.. | .. |
---|
274 | 274 | /* A SDQCR mask comprising all the available/visible pool channels */ |
---|
275 | 275 | static u32 qm_pools_sdqcr; |
---|
276 | 276 | static int __qman_probed; |
---|
| 277 | +static int __qman_requires_cleanup; |
---|
277 | 278 | |
---|
278 | 279 | static inline u32 qm_ccsr_in(u32 offset) |
---|
279 | 280 | { |
---|
.. | .. |
---|
340 | 341 | } |
---|
341 | 342 | |
---|
342 | 343 | #define PFDR_AR_EN BIT(31) |
---|
343 | | -static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) |
---|
| 344 | +static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size) |
---|
344 | 345 | { |
---|
| 346 | + void *ptr; |
---|
345 | 347 | u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; |
---|
346 | 348 | u32 exp = ilog2(size); |
---|
| 349 | + u32 bar, bare; |
---|
347 | 350 | |
---|
348 | 351 | /* choke if size isn't within range */ |
---|
349 | 352 | DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && |
---|
350 | 353 | is_power_of_2(size)); |
---|
351 | 354 | /* choke if 'ba' has lower-alignment than 'size' */ |
---|
352 | 355 | DPAA_ASSERT(!(ba & (size - 1))); |
---|
| 356 | + |
---|
| 357 | + /* Check to see if QMan has already been initialized */ |
---|
| 358 | + bar = qm_ccsr_in(offset + REG_offset_BAR); |
---|
| 359 | + if (bar) { |
---|
| 360 | + /* Maker sure ba == what was programmed) */ |
---|
| 361 | + bare = qm_ccsr_in(offset); |
---|
| 362 | + if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { |
---|
| 363 | + pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n", |
---|
| 364 | + ba, bare, bar); |
---|
| 365 | + return -ENOMEM; |
---|
| 366 | + } |
---|
| 367 | + __qman_requires_cleanup = 1; |
---|
| 368 | + /* Return 1 to indicate memory was previously programmed */ |
---|
| 369 | + return 1; |
---|
| 370 | + } |
---|
| 371 | + /* Need to temporarily map the area to make sure it is zeroed */ |
---|
| 372 | + ptr = memremap(ba, size, MEMREMAP_WB); |
---|
| 373 | + if (!ptr) { |
---|
| 374 | + pr_crit("memremap() of QMan private memory failed\n"); |
---|
| 375 | + return -ENOMEM; |
---|
| 376 | + } |
---|
| 377 | + memset(ptr, 0, size); |
---|
| 378 | + |
---|
| 379 | +#ifdef CONFIG_PPC |
---|
| 380 | + /* |
---|
| 381 | + * PPC doesn't appear to flush the cache on memunmap() but the |
---|
| 382 | + * cache must be flushed since QMan does non coherent accesses |
---|
| 383 | + * to this memory |
---|
| 384 | + */ |
---|
| 385 | + flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size); |
---|
| 386 | +#endif |
---|
| 387 | + memunmap(ptr); |
---|
| 388 | + |
---|
353 | 389 | qm_ccsr_out(offset, upper_32_bits(ba)); |
---|
354 | 390 | qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); |
---|
355 | 391 | qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); |
---|
| 392 | + return 0; |
---|
356 | 393 | } |
---|
357 | 394 | |
---|
358 | 395 | static void qm_set_pfdr_threshold(u32 th, u8 k) |
---|
.. | .. |
---|
419 | 456 | static int zero_priv_mem(phys_addr_t addr, size_t sz) |
---|
420 | 457 | { |
---|
421 | 458 | /* map as cacheable, non-guarded */ |
---|
422 | | - void __iomem *tmpp = ioremap_prot(addr, sz, 0); |
---|
| 459 | + void __iomem *tmpp = ioremap_cache(addr, sz); |
---|
423 | 460 | |
---|
424 | 461 | if (!tmpp) |
---|
425 | 462 | return -ENOMEM; |
---|
.. | .. |
---|
455 | 492 | |
---|
456 | 493 | #endif |
---|
457 | 494 | |
---|
458 | | -static unsigned int qm_get_fqid_maxcnt(void) |
---|
| 495 | +unsigned int qm_get_fqid_maxcnt(void) |
---|
459 | 496 | { |
---|
460 | 497 | return fqd_sz / 64; |
---|
461 | 498 | } |
---|
.. | .. |
---|
571 | 608 | int i, err; |
---|
572 | 609 | |
---|
573 | 610 | /* FQD memory */ |
---|
574 | | - qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); |
---|
575 | | - /* PFDR memory */ |
---|
576 | | - qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); |
---|
577 | | - err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); |
---|
578 | | - if (err) |
---|
| 611 | + err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); |
---|
| 612 | + if (err < 0) |
---|
579 | 613 | return err; |
---|
| 614 | + /* PFDR memory */ |
---|
| 615 | + err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); |
---|
| 616 | + if (err < 0) |
---|
| 617 | + return err; |
---|
| 618 | + /* Only initialize PFDRs if the QMan was not initialized before */ |
---|
| 619 | + if (err == 0) { |
---|
| 620 | + err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); |
---|
| 621 | + if (err) |
---|
| 622 | + return err; |
---|
| 623 | + } |
---|
580 | 624 | /* thresholds */ |
---|
581 | 625 | qm_set_pfdr_threshold(512, 64); |
---|
582 | 626 | qm_set_sfdr_threshold(128); |
---|
.. | .. |
---|
596 | 640 | } |
---|
597 | 641 | |
---|
598 | 642 | #define LIO_CFG_LIODN_MASK 0x0fff0000 |
---|
599 | | -void qman_liodn_fixup(u16 channel) |
---|
| 643 | +void __qman_liodn_fixup(u16 channel) |
---|
600 | 644 | { |
---|
601 | 645 | static int done; |
---|
602 | 646 | static u32 liodn_offset; |
---|
.. | .. |
---|
693 | 737 | } |
---|
694 | 738 | EXPORT_SYMBOL_GPL(qman_is_probed); |
---|
695 | 739 | |
---|
| 740 | +int qman_requires_cleanup(void) |
---|
| 741 | +{ |
---|
| 742 | + return __qman_requires_cleanup; |
---|
| 743 | +} |
---|
| 744 | + |
---|
| 745 | +void qman_done_cleanup(void) |
---|
| 746 | +{ |
---|
| 747 | + qman_enable_irqs(); |
---|
| 748 | + __qman_requires_cleanup = 0; |
---|
| 749 | +} |
---|
| 750 | + |
---|
| 751 | + |
---|
696 | 752 | static int fsl_qman_probe(struct platform_device *pdev) |
---|
697 | 753 | { |
---|
698 | 754 | struct device *dev = &pdev->dev; |
---|