forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/drivers/misc/ocxl/link.c
....@@ -6,6 +6,7 @@
66 #include <linux/mmu_context.h>
77 #include <asm/copro.h>
88 #include <asm/pnv-ocxl.h>
9
+#include <asm/xive.h>
910 #include <misc/ocxl.h>
1011 #include "ocxl_internal.h"
1112 #include "trace.h"
....@@ -76,7 +77,7 @@
7677 * limited number of opencapi slots on a system and lookup is only
7778 * done when the device is probed
7879 */
79
-struct link {
80
+struct ocxl_link {
8081 struct list_head list;
8182 struct kref ref;
8283 int domain;
....@@ -163,7 +164,7 @@
163164 if (fault->dsisr & SPA_XSL_S)
164165 access |= _PAGE_WRITE;
165166
166
- if (REGION_ID(fault->dar) != USER_REGION_ID)
167
+ if (get_region_id(fault->dar) != USER_REGION_ID)
167168 access |= _PAGE_PRIVILEGED;
168169
169170 local_irq_save(flags);
....@@ -179,12 +180,12 @@
179180
180181 static irqreturn_t xsl_fault_handler(int irq, void *data)
181182 {
182
- struct link *link = (struct link *) data;
183
+ struct ocxl_link *link = (struct ocxl_link *) data;
183184 struct spa *spa = link->spa;
184185 u64 dsisr, dar, pe_handle;
185186 struct pe_data *pe_data;
186187 struct ocxl_process_element *pe;
187
- int lpid, pid, tid;
188
+ int pid;
188189 bool schedule = false;
189190
190191 read_irq(spa, &dsisr, &dar, &pe_handle);
....@@ -192,9 +193,7 @@
192193
193194 WARN_ON(pe_handle > SPA_PE_MASK);
194195 pe = spa->spa_mem + pe_handle;
195
- lpid = be32_to_cpu(pe->lpid);
196196 pid = be32_to_cpu(pe->pid);
197
- tid = be32_to_cpu(pe->tid);
198197 /* We could be reading all null values here if the PE is being
199198 * removed while an interrupt kicks in. It's not supposed to
200199 * happen if the driver notified the AFU to terminate the
....@@ -223,6 +222,17 @@
223222 */
224223 rcu_read_unlock();
225224 pr_debug("Unknown mm context for xsl interrupt\n");
225
+ ack_irq(spa, ADDRESS_ERROR);
226
+ return IRQ_HANDLED;
227
+ }
228
+
229
+ if (!pe_data->mm) {
230
+ /*
231
+ * translation fault from a kernel context - an OpenCAPI
232
+ * device tried to access a bad kernel address
233
+ */
234
+ rcu_read_unlock();
235
+ pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
226236 ack_irq(spa, ADDRESS_ERROR);
227237 return IRQ_HANDLED;
228238 }
....@@ -256,7 +266,7 @@
256266 &spa->reg_tfc, &spa->reg_pe_handle);
257267 }
258268
259
-static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
269
+static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
260270 {
261271 struct spa *spa = link->spa;
262272 int rc;
....@@ -273,9 +283,9 @@
273283 spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
274284 link->domain, link->bus, link->dev);
275285 if (!spa->irq_name) {
276
- unmap_irq_registers(spa);
277286 dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
278
- return -ENOMEM;
287
+ rc = -ENOMEM;
288
+ goto err_xsl;
279289 }
280290 /*
281291 * At some point, we'll need to look into allowing a higher
....@@ -283,11 +293,10 @@
283293 */
284294 spa->virq = irq_create_mapping(NULL, hwirq);
285295 if (!spa->virq) {
286
- kfree(spa->irq_name);
287
- unmap_irq_registers(spa);
288296 dev_err(&dev->dev,
289297 "irq_create_mapping failed for translation interrupt\n");
290
- return -EINVAL;
298
+ rc = -EINVAL;
299
+ goto err_name;
291300 }
292301
293302 dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
....@@ -295,18 +304,24 @@
295304 rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
296305 link);
297306 if (rc) {
298
- irq_dispose_mapping(spa->virq);
299
- kfree(spa->irq_name);
300
- unmap_irq_registers(spa);
301307 dev_err(&dev->dev,
302308 "request_irq failed for translation interrupt: %d\n",
303309 rc);
304
- return -EINVAL;
310
+ rc = -EINVAL;
311
+ goto err_mapping;
305312 }
306313 return 0;
314
+
315
+err_mapping:
316
+ irq_dispose_mapping(spa->virq);
317
+err_name:
318
+ kfree(spa->irq_name);
319
+err_xsl:
320
+ unmap_irq_registers(spa);
321
+ return rc;
307322 }
308323
309
-static void release_xsl_irq(struct link *link)
324
+static void release_xsl_irq(struct ocxl_link *link)
310325 {
311326 struct spa *spa = link->spa;
312327
....@@ -318,7 +333,7 @@
318333 unmap_irq_registers(spa);
319334 }
320335
321
-static int alloc_spa(struct pci_dev *dev, struct link *link)
336
+static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
322337 {
323338 struct spa *spa;
324339
....@@ -345,7 +360,7 @@
345360 return 0;
346361 }
347362
348
-static void free_spa(struct link *link)
363
+static void free_spa(struct ocxl_link *link)
349364 {
350365 struct spa *spa = link->spa;
351366
....@@ -359,12 +374,12 @@
359374 }
360375 }
361376
362
-static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link)
377
+static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
363378 {
364
- struct link *link;
379
+ struct ocxl_link *link;
365380 int rc;
366381
367
- link = kzalloc(sizeof(struct link), GFP_KERNEL);
382
+ link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
368383 if (!link)
369384 return -ENOMEM;
370385
....@@ -400,7 +415,7 @@
400415 return rc;
401416 }
402417
403
-static void free_link(struct link *link)
418
+static void free_link(struct ocxl_link *link)
404419 {
405420 release_xsl_irq(link);
406421 free_spa(link);
....@@ -410,7 +425,7 @@
410425 int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
411426 {
412427 int rc = 0;
413
- struct link *link;
428
+ struct ocxl_link *link;
414429
415430 mutex_lock(&links_list_lock);
416431 list_for_each_entry(link, &links_list, list) {
....@@ -437,7 +452,7 @@
437452
438453 static void release_xsl(struct kref *ref)
439454 {
440
- struct link *link = container_of(ref, struct link, ref);
455
+ struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
441456
442457 list_del(&link->list);
443458 /* call platform code before releasing data */
....@@ -447,7 +462,7 @@
447462
448463 void ocxl_link_release(struct pci_dev *dev, void *link_handle)
449464 {
450
- struct link *link = (struct link *) link_handle;
465
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
451466
452467 mutex_lock(&links_list_lock);
453468 kref_put(&link->ref, release_xsl);
....@@ -483,7 +498,7 @@
483498 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
484499 void *xsl_err_data)
485500 {
486
- struct link *link = (struct link *) link_handle;
501
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
487502 struct spa *spa = link->spa;
488503 struct ocxl_process_element *pe;
489504 int pe_handle, rc = 0;
....@@ -520,7 +535,13 @@
520535 pe->amr = cpu_to_be64(amr);
521536 pe->software_state = cpu_to_be32(SPA_PE_VALID);
522537
523
- mm_context_add_copro(mm);
538
+ /*
539
+ * For user contexts, register a copro so that TLBIs are seen
540
+ * by the nest MMU. If we have a kernel context, TLBIs are
541
+ * already global.
542
+ */
543
+ if (mm)
544
+ mm_context_add_copro(mm);
524545 /*
525546 * Barrier is to make sure PE is visible in the SPA before it
526547 * is used by the device. It also helps with the global TLBI
....@@ -543,7 +564,8 @@
543564 * have a reference on mm_users. Incrementing mm_count solves
544565 * the problem.
545566 */
546
- mmgrab(mm);
567
+ if (mm)
568
+ mmgrab(mm);
547569 trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
548570 unlock:
549571 mutex_unlock(&spa->spa_lock);
....@@ -553,7 +575,7 @@
553575
554576 int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
555577 {
556
- struct link *link = (struct link *) link_handle;
578
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
557579 struct spa *spa = link->spa;
558580 struct ocxl_process_element *pe;
559581 int pe_handle, rc;
....@@ -589,7 +611,7 @@
589611
590612 int ocxl_link_remove_pe(void *link_handle, int pasid)
591613 {
592
- struct link *link = (struct link *) link_handle;
614
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
593615 struct spa *spa = link->spa;
594616 struct ocxl_process_element *pe;
595617 struct pe_data *pe_data;
....@@ -649,8 +671,10 @@
649671 if (!pe_data) {
650672 WARN(1, "Couldn't find pe data when removing PE\n");
651673 } else {
652
- mm_context_remove_copro(pe_data->mm);
653
- mmdrop(pe_data->mm);
674
+ if (pe_data->mm) {
675
+ mm_context_remove_copro(pe_data->mm);
676
+ mmdrop(pe_data->mm);
677
+ }
654678 kfree_rcu(pe_data, rcu);
655679 }
656680 unlock:
....@@ -659,32 +683,30 @@
659683 }
660684 EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
661685
662
-int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr)
686
+int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
663687 {
664
- struct link *link = (struct link *) link_handle;
665
- int rc, irq;
666
- u64 addr;
688
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
689
+ int irq;
667690
668691 if (atomic_dec_if_positive(&link->irq_available) < 0)
669692 return -ENOSPC;
670693
671
- rc = pnv_ocxl_alloc_xive_irq(&irq, &addr);
672
- if (rc) {
694
+ irq = xive_native_alloc_irq();
695
+ if (!irq) {
673696 atomic_inc(&link->irq_available);
674
- return rc;
697
+ return -ENXIO;
675698 }
676699
677700 *hw_irq = irq;
678
- *trigger_addr = addr;
679701 return 0;
680702 }
681703 EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
682704
683705 void ocxl_link_free_irq(void *link_handle, int hw_irq)
684706 {
685
- struct link *link = (struct link *) link_handle;
707
+ struct ocxl_link *link = (struct ocxl_link *) link_handle;
686708
687
- pnv_ocxl_free_xive_irq(hw_irq);
709
+ xive_native_free_irq(hw_irq);
688710 atomic_inc(&link->irq_available);
689711 }
690712 EXPORT_SYMBOL_GPL(ocxl_link_free_irq);