.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | | -/** |
---|
| 2 | +/* |
---|
3 | 3 | * PCI Endpoint *Controller* (EPC) library |
---|
4 | 4 | * |
---|
5 | 5 | * Copyright (C) 2017 Texas Instruments |
---|
.. | .. |
---|
84 | 84 | EXPORT_SYMBOL_GPL(pci_epc_get); |
---|
85 | 85 | |
---|
86 | 86 | /** |
---|
| 87 | + * pci_epc_get_first_free_bar() - helper to get first unreserved BAR |
---|
| 88 | + * @epc_features: pci_epc_features structure that holds the reserved bar bitmap |
---|
| 89 | + * |
---|
| 90 | + * Invoke to get the first unreserved BAR that can be used by the endpoint |
---|
| 91 | + * function. For any incorrect value in reserved_bar return '0'. |
---|
| 92 | + */ |
---|
| 93 | +enum pci_barno |
---|
| 94 | +pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) |
---|
| 95 | +{ |
---|
| 96 | + return pci_epc_get_next_free_bar(epc_features, BAR_0); |
---|
| 97 | +} |
---|
| 98 | +EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); |
---|
| 99 | + |
---|
| 100 | +/** |
---|
| 101 | + * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar |
---|
| 102 | + * @epc_features: pci_epc_features structure that holds the reserved bar bitmap |
---|
| 103 | + * @bar: the starting BAR number from where unreserved BAR should be searched |
---|
| 104 | + * |
---|
| 105 | + * Invoke to get the next unreserved BAR starting from @bar that can be used |
---|
| 106 | + * for endpoint function. For any incorrect value in reserved_bar return '0'. |
---|
| 107 | + */ |
---|
| 108 | +enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features |
---|
| 109 | + *epc_features, enum pci_barno bar) |
---|
| 110 | +{ |
---|
| 111 | + unsigned long free_bar; |
---|
| 112 | + |
---|
| 113 | + if (!epc_features) |
---|
| 114 | + return BAR_0; |
---|
| 115 | + |
---|
| 116 | + /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ |
---|
| 117 | + if ((epc_features->bar_fixed_64bit << 1) & 1 << bar) |
---|
| 118 | + bar++; |
---|
| 119 | + |
---|
| 120 | + /* Find if the reserved BAR is also a 64-bit BAR */ |
---|
| 121 | + free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit; |
---|
| 122 | + |
---|
| 123 | + /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */ |
---|
| 124 | + free_bar <<= 1; |
---|
| 125 | + free_bar |= epc_features->reserved_bar; |
---|
| 126 | + |
---|
| 127 | + free_bar = find_next_zero_bit(&free_bar, 6, bar); |
---|
| 128 | + if (free_bar > 5) |
---|
| 129 | + return NO_BAR; |
---|
| 130 | + |
---|
| 131 | + return free_bar; |
---|
| 132 | +} |
---|
| 133 | +EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); |
---|
| 134 | + |
---|
| 135 | +/** |
---|
| 136 | + * pci_epc_get_features() - get the features supported by EPC |
---|
| 137 | + * @epc: the features supported by *this* EPC device will be returned |
---|
| 138 | + * @func_no: the features supported by the EPC device specific to the |
---|
| 139 | + * endpoint function with func_no will be returned |
---|
| 140 | + * |
---|
| 141 | + * Invoke to get the features provided by the EPC which may be |
---|
| 142 | + * specific to an endpoint function. Returns pci_epc_features on success |
---|
| 143 | + * and NULL for any failures. |
---|
| 144 | + */ |
---|
| 145 | +const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, |
---|
| 146 | + u8 func_no) |
---|
| 147 | +{ |
---|
| 148 | + const struct pci_epc_features *epc_features; |
---|
| 149 | + |
---|
| 150 | + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
| 151 | + return NULL; |
---|
| 152 | + |
---|
| 153 | + if (!epc->ops->get_features) |
---|
| 154 | + return NULL; |
---|
| 155 | + |
---|
| 156 | + mutex_lock(&epc->lock); |
---|
| 157 | + epc_features = epc->ops->get_features(epc, func_no); |
---|
| 158 | + mutex_unlock(&epc->lock); |
---|
| 159 | + |
---|
| 160 | + return epc_features; |
---|
| 161 | +} |
---|
| 162 | +EXPORT_SYMBOL_GPL(pci_epc_get_features); |
---|
| 163 | + |
---|
| 164 | +/** |
---|
87 | 165 | * pci_epc_stop() - stop the PCI link |
---|
88 | 166 | * @epc: the link of the EPC device that has to be stopped |
---|
89 | 167 | * |
---|
.. | .. |
---|
91 | 169 | */ |
---|
92 | 170 | void pci_epc_stop(struct pci_epc *epc) |
---|
93 | 171 | { |
---|
94 | | - unsigned long flags; |
---|
95 | | - |
---|
96 | 172 | if (IS_ERR(epc) || !epc->ops->stop) |
---|
97 | 173 | return; |
---|
98 | 174 | |
---|
99 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 175 | + mutex_lock(&epc->lock); |
---|
100 | 176 | epc->ops->stop(epc); |
---|
101 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 177 | + mutex_unlock(&epc->lock); |
---|
102 | 178 | } |
---|
103 | 179 | EXPORT_SYMBOL_GPL(pci_epc_stop); |
---|
104 | 180 | |
---|
.. | .. |
---|
111 | 187 | int pci_epc_start(struct pci_epc *epc) |
---|
112 | 188 | { |
---|
113 | 189 | int ret; |
---|
114 | | - unsigned long flags; |
---|
115 | 190 | |
---|
116 | 191 | if (IS_ERR(epc)) |
---|
117 | 192 | return -EINVAL; |
---|
.. | .. |
---|
119 | 194 | if (!epc->ops->start) |
---|
120 | 195 | return 0; |
---|
121 | 196 | |
---|
122 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 197 | + mutex_lock(&epc->lock); |
---|
123 | 198 | ret = epc->ops->start(epc); |
---|
124 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 199 | + mutex_unlock(&epc->lock); |
---|
125 | 200 | |
---|
126 | 201 | return ret; |
---|
127 | 202 | } |
---|
.. | .. |
---|
140 | 215 | enum pci_epc_irq_type type, u16 interrupt_num) |
---|
141 | 216 | { |
---|
142 | 217 | int ret; |
---|
143 | | - unsigned long flags; |
---|
144 | 218 | |
---|
145 | 219 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
146 | 220 | return -EINVAL; |
---|
.. | .. |
---|
148 | 222 | if (!epc->ops->raise_irq) |
---|
149 | 223 | return 0; |
---|
150 | 224 | |
---|
151 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 225 | + mutex_lock(&epc->lock); |
---|
152 | 226 | ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num); |
---|
153 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 227 | + mutex_unlock(&epc->lock); |
---|
154 | 228 | |
---|
155 | 229 | return ret; |
---|
156 | 230 | } |
---|
.. | .. |
---|
166 | 240 | int pci_epc_get_msi(struct pci_epc *epc, u8 func_no) |
---|
167 | 241 | { |
---|
168 | 242 | int interrupt; |
---|
169 | | - unsigned long flags; |
---|
170 | 243 | |
---|
171 | 244 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
172 | 245 | return 0; |
---|
.. | .. |
---|
174 | 247 | if (!epc->ops->get_msi) |
---|
175 | 248 | return 0; |
---|
176 | 249 | |
---|
177 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 250 | + mutex_lock(&epc->lock); |
---|
178 | 251 | interrupt = epc->ops->get_msi(epc, func_no); |
---|
179 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 252 | + mutex_unlock(&epc->lock); |
---|
180 | 253 | |
---|
181 | 254 | if (interrupt < 0) |
---|
182 | 255 | return 0; |
---|
.. | .. |
---|
199 | 272 | { |
---|
200 | 273 | int ret; |
---|
201 | 274 | u8 encode_int; |
---|
202 | | - unsigned long flags; |
---|
203 | 275 | |
---|
204 | 276 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || |
---|
205 | 277 | interrupts > 32) |
---|
.. | .. |
---|
210 | 282 | |
---|
211 | 283 | encode_int = order_base_2(interrupts); |
---|
212 | 284 | |
---|
213 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 285 | + mutex_lock(&epc->lock); |
---|
214 | 286 | ret = epc->ops->set_msi(epc, func_no, encode_int); |
---|
215 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 287 | + mutex_unlock(&epc->lock); |
---|
216 | 288 | |
---|
217 | 289 | return ret; |
---|
218 | 290 | } |
---|
.. | .. |
---|
228 | 300 | int pci_epc_get_msix(struct pci_epc *epc, u8 func_no) |
---|
229 | 301 | { |
---|
230 | 302 | int interrupt; |
---|
231 | | - unsigned long flags; |
---|
232 | 303 | |
---|
233 | 304 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
234 | 305 | return 0; |
---|
.. | .. |
---|
236 | 307 | if (!epc->ops->get_msix) |
---|
237 | 308 | return 0; |
---|
238 | 309 | |
---|
239 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 310 | + mutex_lock(&epc->lock); |
---|
240 | 311 | interrupt = epc->ops->get_msix(epc, func_no); |
---|
241 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 312 | + mutex_unlock(&epc->lock); |
---|
242 | 313 | |
---|
243 | 314 | if (interrupt < 0) |
---|
244 | 315 | return 0; |
---|
.. | .. |
---|
252 | 323 | * @epc: the EPC device on which MSI-X has to be configured |
---|
253 | 324 | * @func_no: the endpoint function number in the EPC device |
---|
254 | 325 | * @interrupts: number of MSI-X interrupts required by the EPF |
---|
| 326 | + * @bir: BAR where the MSI-X table resides |
---|
| 327 | + * @offset: Offset pointing to the start of MSI-X table |
---|
255 | 328 | * |
---|
256 | 329 | * Invoke to set the required number of MSI-X interrupts. |
---|
257 | 330 | */ |
---|
258 | | -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) |
---|
| 331 | +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, |
---|
| 332 | + enum pci_barno bir, u32 offset) |
---|
259 | 333 | { |
---|
260 | 334 | int ret; |
---|
261 | | - unsigned long flags; |
---|
262 | 335 | |
---|
263 | 336 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || |
---|
264 | 337 | interrupts < 1 || interrupts > 2048) |
---|
.. | .. |
---|
267 | 340 | if (!epc->ops->set_msix) |
---|
268 | 341 | return 0; |
---|
269 | 342 | |
---|
270 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
271 | | - ret = epc->ops->set_msix(epc, func_no, interrupts - 1); |
---|
272 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 343 | + mutex_lock(&epc->lock); |
---|
| 344 | + ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset); |
---|
| 345 | + mutex_unlock(&epc->lock); |
---|
273 | 346 | |
---|
274 | 347 | return ret; |
---|
275 | 348 | } |
---|
.. | .. |
---|
286 | 359 | void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, |
---|
287 | 360 | phys_addr_t phys_addr) |
---|
288 | 361 | { |
---|
289 | | - unsigned long flags; |
---|
290 | | - |
---|
291 | 362 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
292 | 363 | return; |
---|
293 | 364 | |
---|
294 | 365 | if (!epc->ops->unmap_addr) |
---|
295 | 366 | return; |
---|
296 | 367 | |
---|
297 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 368 | + mutex_lock(&epc->lock); |
---|
298 | 369 | epc->ops->unmap_addr(epc, func_no, phys_addr); |
---|
299 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 370 | + mutex_unlock(&epc->lock); |
---|
300 | 371 | } |
---|
301 | 372 | EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); |
---|
302 | 373 | |
---|
.. | .. |
---|
314 | 385 | phys_addr_t phys_addr, u64 pci_addr, size_t size) |
---|
315 | 386 | { |
---|
316 | 387 | int ret; |
---|
317 | | - unsigned long flags; |
---|
318 | 388 | |
---|
319 | 389 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
320 | 390 | return -EINVAL; |
---|
.. | .. |
---|
322 | 392 | if (!epc->ops->map_addr) |
---|
323 | 393 | return 0; |
---|
324 | 394 | |
---|
325 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 395 | + mutex_lock(&epc->lock); |
---|
326 | 396 | ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size); |
---|
327 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 397 | + mutex_unlock(&epc->lock); |
---|
328 | 398 | |
---|
329 | 399 | return ret; |
---|
330 | 400 | } |
---|
.. | .. |
---|
341 | 411 | void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, |
---|
342 | 412 | struct pci_epf_bar *epf_bar) |
---|
343 | 413 | { |
---|
344 | | - unsigned long flags; |
---|
345 | | - |
---|
346 | 414 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || |
---|
347 | 415 | (epf_bar->barno == BAR_5 && |
---|
348 | 416 | epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) |
---|
.. | .. |
---|
351 | 419 | if (!epc->ops->clear_bar) |
---|
352 | 420 | return; |
---|
353 | 421 | |
---|
354 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 422 | + mutex_lock(&epc->lock); |
---|
355 | 423 | epc->ops->clear_bar(epc, func_no, epf_bar); |
---|
356 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 424 | + mutex_unlock(&epc->lock); |
---|
357 | 425 | } |
---|
358 | 426 | EXPORT_SYMBOL_GPL(pci_epc_clear_bar); |
---|
359 | 427 | |
---|
.. | .. |
---|
369 | 437 | struct pci_epf_bar *epf_bar) |
---|
370 | 438 | { |
---|
371 | 439 | int ret; |
---|
372 | | - unsigned long irq_flags; |
---|
373 | 440 | int flags = epf_bar->flags; |
---|
374 | 441 | |
---|
375 | 442 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || |
---|
.. | .. |
---|
384 | 451 | if (!epc->ops->set_bar) |
---|
385 | 452 | return 0; |
---|
386 | 453 | |
---|
387 | | - spin_lock_irqsave(&epc->lock, irq_flags); |
---|
| 454 | + mutex_lock(&epc->lock); |
---|
388 | 455 | ret = epc->ops->set_bar(epc, func_no, epf_bar); |
---|
389 | | - spin_unlock_irqrestore(&epc->lock, irq_flags); |
---|
| 456 | + mutex_unlock(&epc->lock); |
---|
390 | 457 | |
---|
391 | 458 | return ret; |
---|
392 | 459 | } |
---|
.. | .. |
---|
407 | 474 | struct pci_epf_header *header) |
---|
408 | 475 | { |
---|
409 | 476 | int ret; |
---|
410 | | - unsigned long flags; |
---|
411 | 477 | |
---|
412 | 478 | if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) |
---|
413 | 479 | return -EINVAL; |
---|
.. | .. |
---|
415 | 481 | if (!epc->ops->write_header) |
---|
416 | 482 | return 0; |
---|
417 | 483 | |
---|
418 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 484 | + mutex_lock(&epc->lock); |
---|
419 | 485 | ret = epc->ops->write_header(epc, func_no, header); |
---|
420 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 486 | + mutex_unlock(&epc->lock); |
---|
421 | 487 | |
---|
422 | 488 | return ret; |
---|
423 | 489 | } |
---|
.. | .. |
---|
434 | 500 | */ |
---|
435 | 501 | int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) |
---|
436 | 502 | { |
---|
437 | | - unsigned long flags; |
---|
| 503 | + u32 func_no; |
---|
| 504 | + int ret = 0; |
---|
438 | 505 | |
---|
439 | 506 | if (epf->epc) |
---|
440 | 507 | return -EBUSY; |
---|
.. | .. |
---|
442 | 509 | if (IS_ERR(epc)) |
---|
443 | 510 | return -EINVAL; |
---|
444 | 511 | |
---|
445 | | - if (epf->func_no > epc->max_functions - 1) |
---|
446 | | - return -EINVAL; |
---|
| 512 | + mutex_lock(&epc->lock); |
---|
| 513 | + func_no = find_first_zero_bit(&epc->function_num_map, |
---|
| 514 | + BITS_PER_LONG); |
---|
| 515 | + if (func_no >= BITS_PER_LONG) { |
---|
| 516 | + ret = -EINVAL; |
---|
| 517 | + goto ret; |
---|
| 518 | + } |
---|
447 | 519 | |
---|
| 520 | + if (func_no > epc->max_functions - 1) { |
---|
| 521 | + dev_err(&epc->dev, "Exceeding max supported Function Number\n"); |
---|
| 522 | + ret = -EINVAL; |
---|
| 523 | + goto ret; |
---|
| 524 | + } |
---|
| 525 | + |
---|
| 526 | + set_bit(func_no, &epc->function_num_map); |
---|
| 527 | + epf->func_no = func_no; |
---|
448 | 528 | epf->epc = epc; |
---|
449 | 529 | |
---|
450 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
451 | 530 | list_add_tail(&epf->list, &epc->pci_epf); |
---|
452 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
453 | 531 | |
---|
454 | | - return 0; |
---|
| 532 | +ret: |
---|
| 533 | + mutex_unlock(&epc->lock); |
---|
| 534 | + |
---|
| 535 | + return ret; |
---|
455 | 536 | } |
---|
456 | 537 | EXPORT_SYMBOL_GPL(pci_epc_add_epf); |
---|
457 | 538 | |
---|
.. | .. |
---|
464 | 545 | */ |
---|
465 | 546 | void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) |
---|
466 | 547 | { |
---|
467 | | - unsigned long flags; |
---|
468 | | - |
---|
469 | | - if (!epc || IS_ERR(epc)) |
---|
| 548 | + if (!epc || IS_ERR(epc) || !epf) |
---|
470 | 549 | return; |
---|
471 | 550 | |
---|
472 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
| 551 | + mutex_lock(&epc->lock); |
---|
| 552 | + clear_bit(epf->func_no, &epc->function_num_map); |
---|
473 | 553 | list_del(&epf->list); |
---|
474 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 554 | + epf->epc = NULL; |
---|
| 555 | + mutex_unlock(&epc->lock); |
---|
475 | 556 | } |
---|
476 | 557 | EXPORT_SYMBOL_GPL(pci_epc_remove_epf); |
---|
477 | 558 | |
---|
.. | .. |
---|
485 | 566 | */ |
---|
486 | 567 | void pci_epc_linkup(struct pci_epc *epc) |
---|
487 | 568 | { |
---|
488 | | - unsigned long flags; |
---|
489 | | - struct pci_epf *epf; |
---|
490 | | - |
---|
491 | 569 | if (!epc || IS_ERR(epc)) |
---|
492 | 570 | return; |
---|
493 | 571 | |
---|
494 | | - spin_lock_irqsave(&epc->lock, flags); |
---|
495 | | - list_for_each_entry(epf, &epc->pci_epf, list) |
---|
496 | | - pci_epf_linkup(epf); |
---|
497 | | - spin_unlock_irqrestore(&epc->lock, flags); |
---|
| 572 | + atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL); |
---|
498 | 573 | } |
---|
499 | 574 | EXPORT_SYMBOL_GPL(pci_epc_linkup); |
---|
| 575 | + |
---|
| 576 | +/** |
---|
| 577 | + * pci_epc_init_notify() - Notify the EPF device that EPC device's core |
---|
| 578 | + * initialization is completed. |
---|
| 579 | + * @epc: the EPC device whose core initialization is completeds |
---|
| 580 | + * |
---|
| 581 | + * Invoke to Notify the EPF device that the EPC device's initialization |
---|
| 582 | + * is completed. |
---|
| 583 | + */ |
---|
| 584 | +void pci_epc_init_notify(struct pci_epc *epc) |
---|
| 585 | +{ |
---|
| 586 | + if (!epc || IS_ERR(epc)) |
---|
| 587 | + return; |
---|
| 588 | + |
---|
| 589 | + atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL); |
---|
| 590 | +} |
---|
| 591 | +EXPORT_SYMBOL_GPL(pci_epc_init_notify); |
---|
500 | 592 | |
---|
501 | 593 | /** |
---|
502 | 594 | * pci_epc_destroy() - destroy the EPC device |
---|
.. | .. |
---|
556 | 648 | goto err_ret; |
---|
557 | 649 | } |
---|
558 | 650 | |
---|
559 | | - spin_lock_init(&epc->lock); |
---|
| 651 | + mutex_init(&epc->lock); |
---|
560 | 652 | INIT_LIST_HEAD(&epc->pci_epf); |
---|
| 653 | + ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier); |
---|
561 | 654 | |
---|
562 | 655 | device_initialize(&epc->dev); |
---|
563 | 656 | epc->dev.class = pci_epc_class; |
---|