.. | .. |
---|
8 | 8 | |
---|
9 | 9 | #include <linux/crc32.h> |
---|
10 | 10 | #include <linux/delay.h> |
---|
| 11 | +#include <linux/dmaengine.h> |
---|
11 | 12 | #include <linux/io.h> |
---|
12 | 13 | #include <linux/module.h> |
---|
13 | 14 | #include <linux/slab.h> |
---|
.. | .. |
---|
39 | 40 | #define STATUS_SRC_ADDR_INVALID BIT(7) |
---|
40 | 41 | #define STATUS_DST_ADDR_INVALID BIT(8) |
---|
41 | 42 | |
---|
| 43 | +#define FLAG_USE_DMA BIT(0) |
---|
| 44 | + |
---|
42 | 45 | #define TIMER_RESOLUTION 1 |
---|
43 | 46 | |
---|
44 | 47 | static struct workqueue_struct *kpcitest_workqueue; |
---|
45 | 48 | |
---|
46 | 49 | struct pci_epf_test { |
---|
47 | | - void *reg[6]; |
---|
| 50 | + void *reg[PCI_STD_NUM_BARS]; |
---|
48 | 51 | struct pci_epf *epf; |
---|
49 | 52 | enum pci_barno test_reg_bar; |
---|
50 | | - bool linkup_notifier; |
---|
51 | | - bool msix_available; |
---|
| 53 | + size_t msix_table_offset; |
---|
52 | 54 | struct delayed_work cmd_handler; |
---|
| 55 | + struct dma_chan *dma_chan; |
---|
| 56 | + struct completion transfer_complete; |
---|
| 57 | + bool dma_supported; |
---|
| 58 | + const struct pci_epc_features *epc_features; |
---|
53 | 59 | }; |
---|
54 | 60 | |
---|
55 | 61 | struct pci_epf_test_reg { |
---|
.. | .. |
---|
62 | 68 | u32 checksum; |
---|
63 | 69 | u32 irq_type; |
---|
64 | 70 | u32 irq_number; |
---|
| 71 | + u32 flags; |
---|
65 | 72 | } __packed; |
---|
66 | 73 | |
---|
67 | 74 | static struct pci_epf_header test_header = { |
---|
.. | .. |
---|
71 | 78 | .interrupt_pin = PCI_INTERRUPT_INTA, |
---|
72 | 79 | }; |
---|
73 | 80 | |
---|
74 | | -struct pci_epf_test_data { |
---|
75 | | - enum pci_barno test_reg_bar; |
---|
76 | | - bool linkup_notifier; |
---|
77 | | -}; |
---|
78 | | - |
---|
79 | 81 | static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; |
---|
| 82 | + |
---|
| 83 | +static void pci_epf_test_dma_callback(void *param) |
---|
| 84 | +{ |
---|
| 85 | + struct pci_epf_test *epf_test = param; |
---|
| 86 | + |
---|
| 87 | + complete(&epf_test->transfer_complete); |
---|
| 88 | +} |
---|
| 89 | + |
---|
| 90 | +/** |
---|
| 91 | + * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer |
---|
| 92 | + * data between PCIe EP and remote PCIe RC |
---|
| 93 | + * @epf_test: the EPF test device that performs the data transfer operation |
---|
| 94 | + * @dma_dst: The destination address of the data transfer. It can be a physical |
---|
| 95 | + * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. |
---|
| 96 | + * @dma_src: The source address of the data transfer. It can be a physical |
---|
| 97 | + * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. |
---|
| 98 | + * @len: The size of the data transfer |
---|
| 99 | + * |
---|
| 100 | + * Function that uses dmaengine API to transfer data between PCIe EP and remote |
---|
| 101 | + * PCIe RC. The source and destination address can be a physical address given |
---|
| 102 | + * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. |
---|
| 103 | + * |
---|
| 104 | + * The function returns '0' on success and negative value on failure. |
---|
| 105 | + */ |
---|
| 106 | +static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, |
---|
| 107 | + dma_addr_t dma_dst, dma_addr_t dma_src, |
---|
| 108 | + size_t len) |
---|
| 109 | +{ |
---|
| 110 | + enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
---|
| 111 | + struct dma_chan *chan = epf_test->dma_chan; |
---|
| 112 | + struct pci_epf *epf = epf_test->epf; |
---|
| 113 | + struct dma_async_tx_descriptor *tx; |
---|
| 114 | + struct device *dev = &epf->dev; |
---|
| 115 | + dma_cookie_t cookie; |
---|
| 116 | + int ret; |
---|
| 117 | + |
---|
| 118 | + if (IS_ERR_OR_NULL(chan)) { |
---|
| 119 | + dev_err(dev, "Invalid DMA memcpy channel\n"); |
---|
| 120 | + return -EINVAL; |
---|
| 121 | + } |
---|
| 122 | + |
---|
| 123 | + tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); |
---|
| 124 | + if (!tx) { |
---|
| 125 | + dev_err(dev, "Failed to prepare DMA memcpy\n"); |
---|
| 126 | + return -EIO; |
---|
| 127 | + } |
---|
| 128 | + |
---|
| 129 | + tx->callback = pci_epf_test_dma_callback; |
---|
| 130 | + tx->callback_param = epf_test; |
---|
| 131 | + cookie = tx->tx_submit(tx); |
---|
| 132 | + reinit_completion(&epf_test->transfer_complete); |
---|
| 133 | + |
---|
| 134 | + ret = dma_submit_error(cookie); |
---|
| 135 | + if (ret) { |
---|
| 136 | + dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie); |
---|
| 137 | + return -EIO; |
---|
| 138 | + } |
---|
| 139 | + |
---|
| 140 | + dma_async_issue_pending(chan); |
---|
| 141 | + ret = wait_for_completion_interruptible(&epf_test->transfer_complete); |
---|
| 142 | + if (ret < 0) { |
---|
| 143 | + dmaengine_terminate_sync(chan); |
---|
| 144 | + dev_err(dev, "DMA wait_for_completion_timeout\n"); |
---|
| 145 | + return -ETIMEDOUT; |
---|
| 146 | + } |
---|
| 147 | + |
---|
| 148 | + return 0; |
---|
| 149 | +} |
---|
| 150 | + |
---|
| 151 | +/** |
---|
| 152 | + * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel |
---|
| 153 | + * @epf_test: the EPF test device that performs data transfer operation |
---|
| 154 | + * |
---|
| 155 | + * Function to initialize EPF test DMA channel. |
---|
| 156 | + */ |
---|
| 157 | +static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) |
---|
| 158 | +{ |
---|
| 159 | + struct pci_epf *epf = epf_test->epf; |
---|
| 160 | + struct device *dev = &epf->dev; |
---|
| 161 | + struct dma_chan *dma_chan; |
---|
| 162 | + dma_cap_mask_t mask; |
---|
| 163 | + int ret; |
---|
| 164 | + |
---|
| 165 | + dma_cap_zero(mask); |
---|
| 166 | + dma_cap_set(DMA_MEMCPY, mask); |
---|
| 167 | + |
---|
| 168 | + dma_chan = dma_request_chan_by_mask(&mask); |
---|
| 169 | + if (IS_ERR(dma_chan)) { |
---|
| 170 | + ret = PTR_ERR(dma_chan); |
---|
| 171 | + if (ret != -EPROBE_DEFER) |
---|
| 172 | + dev_err(dev, "Failed to get DMA channel\n"); |
---|
| 173 | + return ret; |
---|
| 174 | + } |
---|
| 175 | + init_completion(&epf_test->transfer_complete); |
---|
| 176 | + |
---|
| 177 | + epf_test->dma_chan = dma_chan; |
---|
| 178 | + |
---|
| 179 | + return 0; |
---|
| 180 | +} |
---|
| 181 | + |
---|
| 182 | +/** |
---|
| 183 | + * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel |
---|
| 184 | + * @epf_test: the EPF test device that performs data transfer operation |
---|
| 185 | + * |
---|
| 186 | + * Helper to cleanup EPF test DMA channel. |
---|
| 187 | + */ |
---|
| 188 | +static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) |
---|
| 189 | +{ |
---|
| 190 | + if (!epf_test->dma_supported) |
---|
| 191 | + return; |
---|
| 192 | + |
---|
| 193 | + dma_release_channel(epf_test->dma_chan); |
---|
| 194 | + epf_test->dma_chan = NULL; |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +static void pci_epf_test_print_rate(const char *ops, u64 size, |
---|
| 198 | + struct timespec64 *start, |
---|
| 199 | + struct timespec64 *end, bool dma) |
---|
| 200 | +{ |
---|
| 201 | + struct timespec64 ts; |
---|
| 202 | + u64 rate, ns; |
---|
| 203 | + |
---|
| 204 | + ts = timespec64_sub(*end, *start); |
---|
| 205 | + |
---|
| 206 | + /* convert both size (stored in 'rate') and time in terms of 'ns' */ |
---|
| 207 | + ns = timespec64_to_ns(&ts); |
---|
| 208 | + rate = size * NSEC_PER_SEC; |
---|
| 209 | + |
---|
| 210 | + /* Divide both size (stored in 'rate') and ns by a common factor */ |
---|
| 211 | + while (ns > UINT_MAX) { |
---|
| 212 | + rate >>= 1; |
---|
| 213 | + ns >>= 1; |
---|
| 214 | + } |
---|
| 215 | + |
---|
| 216 | + if (!ns) |
---|
| 217 | + return; |
---|
| 218 | + |
---|
| 219 | + /* calculate the rate */ |
---|
| 220 | + do_div(rate, (uint32_t)ns); |
---|
| 221 | + |
---|
| 222 | + pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t" |
---|
| 223 | + "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO", |
---|
| 224 | + (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024); |
---|
| 225 | +} |
---|
80 | 226 | |
---|
81 | 227 | static int pci_epf_test_copy(struct pci_epf_test *epf_test) |
---|
82 | 228 | { |
---|
83 | 229 | int ret; |
---|
| 230 | + bool use_dma; |
---|
84 | 231 | void __iomem *src_addr; |
---|
85 | 232 | void __iomem *dst_addr; |
---|
86 | 233 | phys_addr_t src_phys_addr; |
---|
87 | 234 | phys_addr_t dst_phys_addr; |
---|
| 235 | + struct timespec64 start, end; |
---|
88 | 236 | struct pci_epf *epf = epf_test->epf; |
---|
89 | 237 | struct device *dev = &epf->dev; |
---|
90 | 238 | struct pci_epc *epc = epf->epc; |
---|
.. | .. |
---|
123 | 271 | goto err_dst_addr; |
---|
124 | 272 | } |
---|
125 | 273 | |
---|
126 | | - memcpy(dst_addr, src_addr, reg->size); |
---|
| 274 | + ktime_get_ts64(&start); |
---|
| 275 | + use_dma = !!(reg->flags & FLAG_USE_DMA); |
---|
| 276 | + if (use_dma) { |
---|
| 277 | + if (!epf_test->dma_supported) { |
---|
| 278 | + dev_err(dev, "Cannot transfer data using DMA\n"); |
---|
| 279 | + ret = -EINVAL; |
---|
| 280 | + goto err_map_addr; |
---|
| 281 | + } |
---|
127 | 282 | |
---|
| 283 | + ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, |
---|
| 284 | + src_phys_addr, reg->size); |
---|
| 285 | + if (ret) |
---|
| 286 | + dev_err(dev, "Data transfer failed\n"); |
---|
| 287 | + } else { |
---|
| 288 | + void *buf; |
---|
| 289 | + |
---|
| 290 | + buf = kzalloc(reg->size, GFP_KERNEL); |
---|
| 291 | + if (!buf) { |
---|
| 292 | + ret = -ENOMEM; |
---|
| 293 | + goto err_map_addr; |
---|
| 294 | + } |
---|
| 295 | + |
---|
| 296 | + memcpy_fromio(buf, src_addr, reg->size); |
---|
| 297 | + memcpy_toio(dst_addr, buf, reg->size); |
---|
| 298 | + kfree(buf); |
---|
| 299 | + } |
---|
| 300 | + ktime_get_ts64(&end); |
---|
| 301 | + pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); |
---|
| 302 | + |
---|
| 303 | +err_map_addr: |
---|
128 | 304 | pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr); |
---|
129 | 305 | |
---|
130 | 306 | err_dst_addr: |
---|
.. | .. |
---|
146 | 322 | void __iomem *src_addr; |
---|
147 | 323 | void *buf; |
---|
148 | 324 | u32 crc32; |
---|
| 325 | + bool use_dma; |
---|
149 | 326 | phys_addr_t phys_addr; |
---|
| 327 | + phys_addr_t dst_phys_addr; |
---|
| 328 | + struct timespec64 start, end; |
---|
150 | 329 | struct pci_epf *epf = epf_test->epf; |
---|
151 | 330 | struct device *dev = &epf->dev; |
---|
152 | 331 | struct pci_epc *epc = epf->epc; |
---|
| 332 | + struct device *dma_dev = epf->epc->dev.parent; |
---|
153 | 333 | enum pci_barno test_reg_bar = epf_test->test_reg_bar; |
---|
154 | 334 | struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; |
---|
155 | 335 | |
---|
.. | .. |
---|
175 | 355 | goto err_map_addr; |
---|
176 | 356 | } |
---|
177 | 357 | |
---|
178 | | - memcpy_fromio(buf, src_addr, reg->size); |
---|
| 358 | + use_dma = !!(reg->flags & FLAG_USE_DMA); |
---|
| 359 | + if (use_dma) { |
---|
| 360 | + if (!epf_test->dma_supported) { |
---|
| 361 | + dev_err(dev, "Cannot transfer data using DMA\n"); |
---|
| 362 | + ret = -EINVAL; |
---|
| 363 | + goto err_dma_map; |
---|
| 364 | + } |
---|
| 365 | + |
---|
| 366 | + dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, |
---|
| 367 | + DMA_FROM_DEVICE); |
---|
| 368 | + if (dma_mapping_error(dma_dev, dst_phys_addr)) { |
---|
| 369 | + dev_err(dev, "Failed to map destination buffer addr\n"); |
---|
| 370 | + ret = -ENOMEM; |
---|
| 371 | + goto err_dma_map; |
---|
| 372 | + } |
---|
| 373 | + |
---|
| 374 | + ktime_get_ts64(&start); |
---|
| 375 | + ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, |
---|
| 376 | + phys_addr, reg->size); |
---|
| 377 | + if (ret) |
---|
| 378 | + dev_err(dev, "Data transfer failed\n"); |
---|
| 379 | + ktime_get_ts64(&end); |
---|
| 380 | + |
---|
| 381 | + dma_unmap_single(dma_dev, dst_phys_addr, reg->size, |
---|
| 382 | + DMA_FROM_DEVICE); |
---|
| 383 | + } else { |
---|
| 384 | + ktime_get_ts64(&start); |
---|
| 385 | + memcpy_fromio(buf, src_addr, reg->size); |
---|
| 386 | + ktime_get_ts64(&end); |
---|
| 387 | + } |
---|
| 388 | + |
---|
| 389 | + pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma); |
---|
179 | 390 | |
---|
180 | 391 | crc32 = crc32_le(~0, buf, reg->size); |
---|
181 | 392 | if (crc32 != reg->checksum) |
---|
182 | 393 | ret = -EIO; |
---|
183 | 394 | |
---|
| 395 | +err_dma_map: |
---|
184 | 396 | kfree(buf); |
---|
185 | 397 | |
---|
186 | 398 | err_map_addr: |
---|
.. | .. |
---|
198 | 410 | int ret; |
---|
199 | 411 | void __iomem *dst_addr; |
---|
200 | 412 | void *buf; |
---|
| 413 | + bool use_dma; |
---|
201 | 414 | phys_addr_t phys_addr; |
---|
| 415 | + phys_addr_t src_phys_addr; |
---|
| 416 | + struct timespec64 start, end; |
---|
202 | 417 | struct pci_epf *epf = epf_test->epf; |
---|
203 | 418 | struct device *dev = &epf->dev; |
---|
204 | 419 | struct pci_epc *epc = epf->epc; |
---|
| 420 | + struct device *dma_dev = epf->epc->dev.parent; |
---|
205 | 421 | enum pci_barno test_reg_bar = epf_test->test_reg_bar; |
---|
206 | 422 | struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; |
---|
207 | 423 | |
---|
.. | .. |
---|
230 | 446 | get_random_bytes(buf, reg->size); |
---|
231 | 447 | reg->checksum = crc32_le(~0, buf, reg->size); |
---|
232 | 448 | |
---|
233 | | - memcpy_toio(dst_addr, buf, reg->size); |
---|
| 449 | + use_dma = !!(reg->flags & FLAG_USE_DMA); |
---|
| 450 | + if (use_dma) { |
---|
| 451 | + if (!epf_test->dma_supported) { |
---|
| 452 | + dev_err(dev, "Cannot transfer data using DMA\n"); |
---|
| 453 | + ret = -EINVAL; |
---|
| 454 | + goto err_dma_map; |
---|
| 455 | + } |
---|
| 456 | + |
---|
| 457 | + src_phys_addr = dma_map_single(dma_dev, buf, reg->size, |
---|
| 458 | + DMA_TO_DEVICE); |
---|
| 459 | + if (dma_mapping_error(dma_dev, src_phys_addr)) { |
---|
| 460 | + dev_err(dev, "Failed to map source buffer addr\n"); |
---|
| 461 | + ret = -ENOMEM; |
---|
| 462 | + goto err_dma_map; |
---|
| 463 | + } |
---|
| 464 | + |
---|
| 465 | + ktime_get_ts64(&start); |
---|
| 466 | + ret = pci_epf_test_data_transfer(epf_test, phys_addr, |
---|
| 467 | + src_phys_addr, reg->size); |
---|
| 468 | + if (ret) |
---|
| 469 | + dev_err(dev, "Data transfer failed\n"); |
---|
| 470 | + ktime_get_ts64(&end); |
---|
| 471 | + |
---|
| 472 | + dma_unmap_single(dma_dev, src_phys_addr, reg->size, |
---|
| 473 | + DMA_TO_DEVICE); |
---|
| 474 | + } else { |
---|
| 475 | + ktime_get_ts64(&start); |
---|
| 476 | + memcpy_toio(dst_addr, buf, reg->size); |
---|
| 477 | + ktime_get_ts64(&end); |
---|
| 478 | + } |
---|
| 479 | + |
---|
| 480 | + pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma); |
---|
234 | 481 | |
---|
235 | 482 | /* |
---|
236 | 483 | * wait 1ms inorder for the write to complete. Without this delay L3 |
---|
.. | .. |
---|
238 | 485 | */ |
---|
239 | 486 | usleep_range(1000, 2000); |
---|
240 | 487 | |
---|
| 488 | +err_dma_map: |
---|
241 | 489 | kfree(buf); |
---|
242 | 490 | |
---|
243 | 491 | err_map_addr: |
---|
.. | .. |
---|
366 | 614 | msecs_to_jiffies(1)); |
---|
367 | 615 | } |
---|
368 | 616 | |
---|
369 | | -static void pci_epf_test_linkup(struct pci_epf *epf) |
---|
370 | | -{ |
---|
371 | | - struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
372 | | - |
---|
373 | | - queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, |
---|
374 | | - msecs_to_jiffies(1)); |
---|
375 | | -} |
---|
376 | | - |
---|
377 | 617 | static void pci_epf_test_unbind(struct pci_epf *epf) |
---|
378 | 618 | { |
---|
379 | 619 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
.. | .. |
---|
382 | 622 | int bar; |
---|
383 | 623 | |
---|
384 | 624 | cancel_delayed_work(&epf_test->cmd_handler); |
---|
385 | | - pci_epc_stop(epc); |
---|
386 | | - for (bar = BAR_0; bar <= BAR_5; bar++) { |
---|
| 625 | + pci_epf_test_clean_dma_chan(epf_test); |
---|
| 626 | + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { |
---|
387 | 627 | epf_bar = &epf->bar[bar]; |
---|
388 | 628 | |
---|
389 | 629 | if (epf_test->reg[bar]) { |
---|
390 | | - pci_epf_free_space(epf, epf_test->reg[bar], bar); |
---|
391 | 630 | pci_epc_clear_bar(epc, epf->func_no, epf_bar); |
---|
| 631 | + pci_epf_free_space(epf, epf_test->reg[bar], bar); |
---|
392 | 632 | } |
---|
393 | 633 | } |
---|
394 | 634 | } |
---|
395 | 635 | |
---|
396 | 636 | static int pci_epf_test_set_bar(struct pci_epf *epf) |
---|
397 | 637 | { |
---|
398 | | - int bar; |
---|
| 638 | + int bar, add; |
---|
399 | 639 | int ret; |
---|
400 | 640 | struct pci_epf_bar *epf_bar; |
---|
401 | 641 | struct pci_epc *epc = epf->epc; |
---|
402 | 642 | struct device *dev = &epf->dev; |
---|
403 | 643 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
404 | 644 | enum pci_barno test_reg_bar = epf_test->test_reg_bar; |
---|
| 645 | + const struct pci_epc_features *epc_features; |
---|
405 | 646 | |
---|
406 | | - for (bar = BAR_0; bar <= BAR_5; bar++) { |
---|
| 647 | + epc_features = epf_test->epc_features; |
---|
| 648 | + |
---|
| 649 | + for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { |
---|
407 | 650 | epf_bar = &epf->bar[bar]; |
---|
| 651 | + /* |
---|
| 652 | + * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 |
---|
| 653 | + * if the specific implementation required a 64-bit BAR, |
---|
| 654 | + * even if we only requested a 32-bit BAR. |
---|
| 655 | + */ |
---|
| 656 | + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; |
---|
408 | 657 | |
---|
409 | | - epf_bar->flags |= upper_32_bits(epf_bar->size) ? |
---|
410 | | - PCI_BASE_ADDRESS_MEM_TYPE_64 : |
---|
411 | | - PCI_BASE_ADDRESS_MEM_TYPE_32; |
---|
| 658 | + if (!!(epc_features->reserved_bar & (1 << bar))) |
---|
| 659 | + continue; |
---|
412 | 660 | |
---|
413 | 661 | ret = pci_epc_set_bar(epc, epf->func_no, epf_bar); |
---|
414 | 662 | if (ret) { |
---|
.. | .. |
---|
417 | 665 | if (bar == test_reg_bar) |
---|
418 | 666 | return ret; |
---|
419 | 667 | } |
---|
420 | | - /* |
---|
421 | | - * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 |
---|
422 | | - * if the specific implementation required a 64-bit BAR, |
---|
423 | | - * even if we only requested a 32-bit BAR. |
---|
424 | | - */ |
---|
425 | | - if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) |
---|
426 | | - bar++; |
---|
427 | 668 | } |
---|
428 | 669 | |
---|
429 | 670 | return 0; |
---|
| 671 | +} |
---|
| 672 | + |
---|
| 673 | +static int pci_epf_test_core_init(struct pci_epf *epf) |
---|
| 674 | +{ |
---|
| 675 | + struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
| 676 | + struct pci_epf_header *header = epf->header; |
---|
| 677 | + const struct pci_epc_features *epc_features; |
---|
| 678 | + struct pci_epc *epc = epf->epc; |
---|
| 679 | + struct device *dev = &epf->dev; |
---|
| 680 | + bool msix_capable = false; |
---|
| 681 | + bool msi_capable = true; |
---|
| 682 | + int ret; |
---|
| 683 | + |
---|
| 684 | + epc_features = pci_epc_get_features(epc, epf->func_no); |
---|
| 685 | + if (epc_features) { |
---|
| 686 | + msix_capable = epc_features->msix_capable; |
---|
| 687 | + msi_capable = epc_features->msi_capable; |
---|
| 688 | + } |
---|
| 689 | + |
---|
| 690 | + ret = pci_epc_write_header(epc, epf->func_no, header); |
---|
| 691 | + if (ret) { |
---|
| 692 | + dev_err(dev, "Configuration header write failed\n"); |
---|
| 693 | + return ret; |
---|
| 694 | + } |
---|
| 695 | + |
---|
| 696 | + ret = pci_epf_test_set_bar(epf); |
---|
| 697 | + if (ret) |
---|
| 698 | + return ret; |
---|
| 699 | + |
---|
| 700 | + if (msi_capable) { |
---|
| 701 | + ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); |
---|
| 702 | + if (ret) { |
---|
| 703 | + dev_err(dev, "MSI configuration failed\n"); |
---|
| 704 | + return ret; |
---|
| 705 | + } |
---|
| 706 | + } |
---|
| 707 | + |
---|
| 708 | + if (msix_capable) { |
---|
| 709 | + ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts, |
---|
| 710 | + epf_test->test_reg_bar, |
---|
| 711 | + epf_test->msix_table_offset); |
---|
| 712 | + if (ret) { |
---|
| 713 | + dev_err(dev, "MSI-X configuration failed\n"); |
---|
| 714 | + return ret; |
---|
| 715 | + } |
---|
| 716 | + } |
---|
| 717 | + |
---|
| 718 | + return 0; |
---|
| 719 | +} |
---|
| 720 | + |
---|
| 721 | +static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val, |
---|
| 722 | + void *data) |
---|
| 723 | +{ |
---|
| 724 | + struct pci_epf *epf = container_of(nb, struct pci_epf, nb); |
---|
| 725 | + struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
| 726 | + int ret; |
---|
| 727 | + |
---|
| 728 | + switch (val) { |
---|
| 729 | + case CORE_INIT: |
---|
| 730 | + ret = pci_epf_test_core_init(epf); |
---|
| 731 | + if (ret) |
---|
| 732 | + return NOTIFY_BAD; |
---|
| 733 | + break; |
---|
| 734 | + |
---|
| 735 | + case LINK_UP: |
---|
| 736 | + queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, |
---|
| 737 | + msecs_to_jiffies(1)); |
---|
| 738 | + break; |
---|
| 739 | + |
---|
| 740 | + default: |
---|
| 741 | + dev_err(&epf->dev, "Invalid EPF test notifier event\n"); |
---|
| 742 | + return NOTIFY_BAD; |
---|
| 743 | + } |
---|
| 744 | + |
---|
| 745 | + return NOTIFY_OK; |
---|
430 | 746 | } |
---|
431 | 747 | |
---|
432 | 748 | static int pci_epf_test_alloc_space(struct pci_epf *epf) |
---|
433 | 749 | { |
---|
434 | 750 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
435 | 751 | struct device *dev = &epf->dev; |
---|
| 752 | + struct pci_epf_bar *epf_bar; |
---|
| 753 | + size_t msix_table_size = 0; |
---|
| 754 | + size_t test_reg_bar_size; |
---|
| 755 | + size_t pba_size = 0; |
---|
| 756 | + bool msix_capable; |
---|
436 | 757 | void *base; |
---|
437 | | - int bar; |
---|
| 758 | + int bar, add; |
---|
438 | 759 | enum pci_barno test_reg_bar = epf_test->test_reg_bar; |
---|
| 760 | + const struct pci_epc_features *epc_features; |
---|
| 761 | + size_t test_reg_size; |
---|
439 | 762 | |
---|
440 | | - base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), |
---|
441 | | - test_reg_bar); |
---|
| 763 | + epc_features = epf_test->epc_features; |
---|
| 764 | + |
---|
| 765 | + test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); |
---|
| 766 | + |
---|
| 767 | + msix_capable = epc_features->msix_capable; |
---|
| 768 | + if (msix_capable) { |
---|
| 769 | + msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; |
---|
| 770 | + epf_test->msix_table_offset = test_reg_bar_size; |
---|
| 771 | + /* Align to QWORD or 8 Bytes */ |
---|
| 772 | + pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); |
---|
| 773 | + } |
---|
| 774 | + test_reg_size = test_reg_bar_size + msix_table_size + pba_size; |
---|
| 775 | + |
---|
| 776 | + if (epc_features->bar_fixed_size[test_reg_bar]) { |
---|
| 777 | + if (test_reg_size > bar_size[test_reg_bar]) |
---|
| 778 | + return -ENOMEM; |
---|
| 779 | + test_reg_size = bar_size[test_reg_bar]; |
---|
| 780 | + } |
---|
| 781 | + |
---|
| 782 | + base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, |
---|
| 783 | + epc_features->align); |
---|
442 | 784 | if (!base) { |
---|
443 | 785 | dev_err(dev, "Failed to allocated register space\n"); |
---|
444 | 786 | return -ENOMEM; |
---|
445 | 787 | } |
---|
446 | 788 | epf_test->reg[test_reg_bar] = base; |
---|
447 | 789 | |
---|
448 | | - for (bar = BAR_0; bar <= BAR_5; bar++) { |
---|
| 790 | + for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { |
---|
| 791 | + epf_bar = &epf->bar[bar]; |
---|
| 792 | + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; |
---|
| 793 | + |
---|
449 | 794 | if (bar == test_reg_bar) |
---|
450 | 795 | continue; |
---|
451 | | - base = pci_epf_alloc_space(epf, bar_size[bar], bar); |
---|
| 796 | + |
---|
| 797 | + if (!!(epc_features->reserved_bar & (1 << bar))) |
---|
| 798 | + continue; |
---|
| 799 | + |
---|
| 800 | + base = pci_epf_alloc_space(epf, bar_size[bar], bar, |
---|
| 801 | + epc_features->align); |
---|
452 | 802 | if (!base) |
---|
453 | 803 | dev_err(dev, "Failed to allocate space for BAR%d\n", |
---|
454 | 804 | bar); |
---|
.. | .. |
---|
458 | 808 | return 0; |
---|
459 | 809 | } |
---|
460 | 810 | |
---|
| 811 | +static void pci_epf_configure_bar(struct pci_epf *epf, |
---|
| 812 | + const struct pci_epc_features *epc_features) |
---|
| 813 | +{ |
---|
| 814 | + struct pci_epf_bar *epf_bar; |
---|
| 815 | + bool bar_fixed_64bit; |
---|
| 816 | + int i; |
---|
| 817 | + |
---|
| 818 | + for (i = 0; i < PCI_STD_NUM_BARS; i++) { |
---|
| 819 | + epf_bar = &epf->bar[i]; |
---|
| 820 | + bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); |
---|
| 821 | + if (bar_fixed_64bit) |
---|
| 822 | + epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; |
---|
| 823 | + if (epc_features->bar_fixed_size[i]) |
---|
| 824 | + bar_size[i] = epc_features->bar_fixed_size[i]; |
---|
| 825 | + } |
---|
| 826 | +} |
---|
| 827 | + |
---|
461 | 828 | static int pci_epf_test_bind(struct pci_epf *epf) |
---|
462 | 829 | { |
---|
463 | 830 | int ret; |
---|
464 | 831 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); |
---|
465 | | - struct pci_epf_header *header = epf->header; |
---|
| 832 | + const struct pci_epc_features *epc_features; |
---|
| 833 | + enum pci_barno test_reg_bar = BAR_0; |
---|
466 | 834 | struct pci_epc *epc = epf->epc; |
---|
467 | | - struct device *dev = &epf->dev; |
---|
| 835 | + bool linkup_notifier = false; |
---|
| 836 | + bool core_init_notifier = false; |
---|
468 | 837 | |
---|
469 | 838 | if (WARN_ON_ONCE(!epc)) |
---|
470 | 839 | return -EINVAL; |
---|
471 | 840 | |
---|
472 | | - if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER) |
---|
473 | | - epf_test->linkup_notifier = false; |
---|
474 | | - else |
---|
475 | | - epf_test->linkup_notifier = true; |
---|
476 | | - |
---|
477 | | - epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE; |
---|
478 | | - |
---|
479 | | - epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features); |
---|
480 | | - |
---|
481 | | - ret = pci_epc_write_header(epc, epf->func_no, header); |
---|
482 | | - if (ret) { |
---|
483 | | - dev_err(dev, "Configuration header write failed\n"); |
---|
484 | | - return ret; |
---|
| 841 | + epc_features = pci_epc_get_features(epc, epf->func_no); |
---|
| 842 | + if (!epc_features) { |
---|
| 843 | + dev_err(&epf->dev, "epc_features not implemented\n"); |
---|
| 844 | + return -EOPNOTSUPP; |
---|
485 | 845 | } |
---|
| 846 | + |
---|
| 847 | + linkup_notifier = epc_features->linkup_notifier; |
---|
| 848 | + core_init_notifier = epc_features->core_init_notifier; |
---|
| 849 | + test_reg_bar = pci_epc_get_first_free_bar(epc_features); |
---|
| 850 | + if (test_reg_bar < 0) |
---|
| 851 | + return -EINVAL; |
---|
| 852 | + pci_epf_configure_bar(epf, epc_features); |
---|
| 853 | + |
---|
| 854 | + epf_test->test_reg_bar = test_reg_bar; |
---|
| 855 | + epf_test->epc_features = epc_features; |
---|
486 | 856 | |
---|
487 | 857 | ret = pci_epf_test_alloc_space(epf); |
---|
488 | 858 | if (ret) |
---|
489 | 859 | return ret; |
---|
490 | 860 | |
---|
491 | | - ret = pci_epf_test_set_bar(epf); |
---|
492 | | - if (ret) |
---|
493 | | - return ret; |
---|
494 | | - |
---|
495 | | - ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); |
---|
496 | | - if (ret) { |
---|
497 | | - dev_err(dev, "MSI configuration failed\n"); |
---|
498 | | - return ret; |
---|
499 | | - } |
---|
500 | | - |
---|
501 | | - if (epf_test->msix_available) { |
---|
502 | | - ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts); |
---|
503 | | - if (ret) { |
---|
504 | | - dev_err(dev, "MSI-X configuration failed\n"); |
---|
| 861 | + if (!core_init_notifier) { |
---|
| 862 | + ret = pci_epf_test_core_init(epf); |
---|
| 863 | + if (ret) |
---|
505 | 864 | return ret; |
---|
506 | | - } |
---|
507 | 865 | } |
---|
508 | 866 | |
---|
509 | | - if (!epf_test->linkup_notifier) |
---|
| 867 | + epf_test->dma_supported = true; |
---|
| 868 | + |
---|
| 869 | + ret = pci_epf_test_init_dma_chan(epf_test); |
---|
| 870 | + if (ret) |
---|
| 871 | + epf_test->dma_supported = false; |
---|
| 872 | + |
---|
| 873 | + if (linkup_notifier || core_init_notifier) { |
---|
| 874 | + epf->nb.notifier_call = pci_epf_test_notifier; |
---|
| 875 | + pci_epc_register_notifier(epc, &epf->nb); |
---|
| 876 | + } else { |
---|
510 | 877 | queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); |
---|
| 878 | + } |
---|
511 | 879 | |
---|
512 | 880 | return 0; |
---|
513 | 881 | } |
---|
.. | .. |
---|
523 | 891 | { |
---|
524 | 892 | struct pci_epf_test *epf_test; |
---|
525 | 893 | struct device *dev = &epf->dev; |
---|
526 | | - const struct pci_epf_device_id *match; |
---|
527 | | - struct pci_epf_test_data *data; |
---|
528 | | - enum pci_barno test_reg_bar = BAR_0; |
---|
529 | | - bool linkup_notifier = true; |
---|
530 | | - |
---|
531 | | - match = pci_epf_match_device(pci_epf_test_ids, epf); |
---|
532 | | - data = (struct pci_epf_test_data *)match->driver_data; |
---|
533 | | - if (data) { |
---|
534 | | - test_reg_bar = data->test_reg_bar; |
---|
535 | | - linkup_notifier = data->linkup_notifier; |
---|
536 | | - } |
---|
537 | 894 | |
---|
538 | 895 | epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); |
---|
539 | 896 | if (!epf_test) |
---|
.. | .. |
---|
541 | 898 | |
---|
542 | 899 | epf->header = &test_header; |
---|
543 | 900 | epf_test->epf = epf; |
---|
544 | | - epf_test->test_reg_bar = test_reg_bar; |
---|
545 | | - epf_test->linkup_notifier = linkup_notifier; |
---|
546 | 901 | |
---|
547 | 902 | INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); |
---|
548 | 903 | |
---|
.. | .. |
---|
553 | 908 | static struct pci_epf_ops ops = { |
---|
554 | 909 | .unbind = pci_epf_test_unbind, |
---|
555 | 910 | .bind = pci_epf_test_bind, |
---|
556 | | - .linkup = pci_epf_test_linkup, |
---|
557 | 911 | }; |
---|
558 | 912 | |
---|
559 | 913 | static struct pci_epf_driver test_driver = { |
---|
.. | .. |
---|
570 | 924 | |
---|
571 | 925 | kpcitest_workqueue = alloc_workqueue("kpcitest", |
---|
572 | 926 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); |
---|
| 927 | + if (!kpcitest_workqueue) { |
---|
| 928 | + pr_err("Failed to allocate the kpcitest work queue\n"); |
---|
| 929 | + return -ENOMEM; |
---|
| 930 | + } |
---|
| 931 | + |
---|
573 | 932 | ret = pci_epf_register_driver(&test_driver); |
---|
574 | 933 | if (ret) { |
---|
575 | 934 | destroy_workqueue(kpcitest_workqueue); |
---|