hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/pci/controller/dwc/pcie-designware-host.c
....@@ -3,13 +3,14 @@
33 * Synopsys DesignWare PCIe host controller driver
44 *
55 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6
- * http://www.samsung.com
6
+ * https://www.samsung.com
77 *
88 * Author: Jingoo Han <jg1.han@samsung.com>
99 */
1010
1111 #include <linux/irqchip/chained_irq.h>
1212 #include <linux/irqdomain.h>
13
+#include <linux/msi.h>
1314 #include <linux/of_address.h>
1415 #include <linux/of_pci.h>
1516 #include <linux/pci_regs.h>
....@@ -19,30 +20,7 @@
1920 #include "pcie-designware.h"
2021
2122 static struct pci_ops dw_pcie_ops;
22
-
23
-static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
24
- u32 *val)
25
-{
26
- struct dw_pcie *pci;
27
-
28
- if (pp->ops->rd_own_conf)
29
- return pp->ops->rd_own_conf(pp, where, size, val);
30
-
31
- pci = to_dw_pcie_from_pp(pp);
32
- return dw_pcie_read(pci->dbi_base + where, size, val);
33
-}
34
-
35
-static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
36
- u32 val)
37
-{
38
- struct dw_pcie *pci;
39
-
40
- if (pp->ops->wr_own_conf)
41
- return pp->ops->wr_own_conf(pp, where, size, val);
42
-
43
- pci = to_dw_pcie_from_pp(pp);
44
- return dw_pcie_write(pci->dbi_base + where, size, val);
45
-}
23
+static struct pci_ops dw_child_pcie_ops;
4624
4725 static void dw_msi_ack_irq(struct irq_data *d)
4826 {
....@@ -81,13 +59,13 @@
8159 unsigned long val;
8260 u32 status, num_ctrls;
8361 irqreturn_t ret = IRQ_NONE;
62
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
8463
85
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
64
+ num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL);
8665
8766 for (i = 0; i < num_ctrls; i++) {
88
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
89
- (i * MSI_REG_CTRL_BLOCK_SIZE),
90
- 4, &status);
67
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
68
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
9169 if (!status)
9270 continue;
9371
....@@ -106,6 +84,7 @@
10684
10785 return ret;
10886 }
87
+EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
10988
11089 /* Chained MSI interrupt service routine */
11190 static void dw_chained_msi_isr(struct irq_desc *desc)
....@@ -121,77 +100,63 @@
121100 chained_irq_exit(chip, desc);
122101 }
123102
124
-static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
103
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
125104 {
126
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
105
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
127106 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
128107 u64 msi_target;
129108
130
- if (pp->ops->get_msi_addr)
131
- msi_target = pp->ops->get_msi_addr(pp);
132
- else
133
- msi_target = (u64)pp->msi_data;
109
+ msi_target = (u64)pp->msi_data;
134110
135111 msg->address_lo = lower_32_bits(msi_target);
136112 msg->address_hi = upper_32_bits(msi_target);
137113
138
- if (pp->ops->get_msi_data)
139
- msg->data = pp->ops->get_msi_data(pp, data->hwirq);
140
- else
141
- msg->data = data->hwirq;
114
+ msg->data = d->hwirq;
142115
143116 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
144
- (int)data->hwirq, msg->address_hi, msg->address_lo);
117
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
145118 }
146119
147
-static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
120
+static int dw_pci_msi_set_affinity(struct irq_data *d,
148121 const struct cpumask *mask, bool force)
149122 {
150123 return -EINVAL;
151124 }
152125
153
-static void dw_pci_bottom_mask(struct irq_data *data)
126
+static void dw_pci_bottom_mask(struct irq_data *d)
154127 {
155
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
128
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
129
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
156130 unsigned int res, bit, ctrl;
157131 unsigned long flags;
158132
159133 raw_spin_lock_irqsave(&pp->lock, flags);
160134
161
- if (pp->ops->msi_clear_irq) {
162
- pp->ops->msi_clear_irq(pp, data->hwirq);
163
- } else {
164
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
165
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
166
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
135
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
136
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
137
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
167138
168
- pp->irq_status[ctrl] &= ~(1 << bit);
169
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
170
- ~pp->irq_status[ctrl]);
171
- }
139
+ pp->irq_mask[ctrl] |= BIT(bit);
140
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
172141
173142 raw_spin_unlock_irqrestore(&pp->lock, flags);
174143 }
175144
176
-static void dw_pci_bottom_unmask(struct irq_data *data)
145
+static void dw_pci_bottom_unmask(struct irq_data *d)
177146 {
178
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
147
+ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
148
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
179149 unsigned int res, bit, ctrl;
180150 unsigned long flags;
181151
182152 raw_spin_lock_irqsave(&pp->lock, flags);
183153
184
- if (pp->ops->msi_set_irq) {
185
- pp->ops->msi_set_irq(pp, data->hwirq);
186
- } else {
187
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
188
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
189
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
154
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
155
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
156
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
190157
191
- pp->irq_status[ctrl] |= 1 << bit;
192
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
193
- ~pp->irq_status[ctrl]);
194
- }
158
+ pp->irq_mask[ctrl] &= ~BIT(bit);
159
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
195160
196161 raw_spin_unlock_irqrestore(&pp->lock, flags);
197162 }
....@@ -199,21 +164,14 @@
199164 static void dw_pci_bottom_ack(struct irq_data *d)
200165 {
201166 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
167
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
202168 unsigned int res, bit, ctrl;
203
- unsigned long flags;
204169
205170 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
206171 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
207172 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
208173
209
- raw_spin_lock_irqsave(&pp->lock, flags);
210
-
211
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
212
-
213
- if (pp->ops->msi_irq_ack)
214
- pp->ops->msi_irq_ack(d->hwirq, pp);
215
-
216
- raw_spin_unlock_irqrestore(&pp->lock, flags);
174
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
217175 }
218176
219177 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
....@@ -246,7 +204,7 @@
246204
247205 for (i = 0; i < nr_irqs; i++)
248206 irq_domain_set_info(domain, virq + i, bit + i,
249
- &dw_pci_msi_bottom_irq_chip,
207
+ pp->msi_irq_chip,
250208 pp, handle_edge_irq,
251209 NULL, NULL);
252210
....@@ -256,13 +214,13 @@
256214 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
257215 unsigned int virq, unsigned int nr_irqs)
258216 {
259
- struct irq_data *data = irq_domain_get_irq_data(domain, virq);
260
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
217
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
218
+ struct pcie_port *pp = domain->host_data;
261219 unsigned long flags;
262220
263221 raw_spin_lock_irqsave(&pp->lock, flags);
264222
265
- bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
223
+ bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
266224 order_base_2(nr_irqs));
267225
268226 raw_spin_unlock_irqrestore(&pp->lock, flags);
....@@ -277,10 +235,6 @@
277235 {
278236 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
279237 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
280
-
281
- /* Rely on the external MSI domain */
282
- if (pp->msi_ext)
283
- return 0;
284238
285239 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
286240 &dw_pcie_msi_domain_ops, pp);
....@@ -305,42 +259,36 @@
305259
306260 void dw_pcie_free_msi(struct pcie_port *pp)
307261 {
308
- if (pp->msi_ext)
309
- return;
310
-
311
- irq_set_chained_handler(pp->msi_irq, NULL);
312
- irq_set_handler_data(pp->msi_irq, NULL);
262
+ if (pp->msi_irq) {
263
+ irq_set_chained_handler(pp->msi_irq, NULL);
264
+ irq_set_handler_data(pp->msi_irq, NULL);
265
+ }
313266
314267 irq_domain_remove(pp->msi_domain);
315268 irq_domain_remove(pp->irq_domain);
316269
317
- if (pp->msi_page)
318
- __free_page(pp->msi_page);
270
+ if (pp->msi_data) {
271
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
272
+ struct device *dev = pci->dev;
273
+
274
+ dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
275
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
276
+ }
319277 }
320278
321279 void dw_pcie_msi_init(struct pcie_port *pp)
322280 {
323281 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
324
- struct device *dev = pci->dev;
325
- u64 msi_target;
282
+ u64 msi_target = (u64)pp->msi_data;
326283
327
- pp->msi_page = alloc_page(GFP_KERNEL);
328
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
329
- DMA_FROM_DEVICE);
330
- if (dma_mapping_error(dev, pp->msi_data)) {
331
- dev_err(dev, "Failed to map MSI data\n");
332
- __free_page(pp->msi_page);
333
- pp->msi_page = NULL;
284
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
334285 return;
335
- }
336
- msi_target = (u64)pp->msi_data;
337286
338287 /* Program the msi_data */
339
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
340
- lower_32_bits(msi_target));
341
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
342
- upper_32_bits(msi_target));
288
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
289
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
343290 }
291
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
344292
345293 int dw_pcie_host_init(struct pcie_port *pp)
346294 {
....@@ -348,8 +296,7 @@
348296 struct device *dev = pci->dev;
349297 struct device_node *np = dev->of_node;
350298 struct platform_device *pdev = to_platform_device(dev);
351
- struct resource_entry *win, *tmp;
352
- struct pci_bus *bus, *child;
299
+ struct resource_entry *win;
353300 struct pci_host_bridge *bridge;
354301 struct resource *cfg_res;
355302 int ret;
....@@ -358,10 +305,8 @@
358305
359306 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
360307 if (cfg_res) {
361
- pp->cfg0_size = resource_size(cfg_res) >> 1;
362
- pp->cfg1_size = resource_size(cfg_res) >> 1;
308
+ pp->cfg0_size = resource_size(cfg_res);
363309 pp->cfg0_base = cfg_res->start;
364
- pp->cfg1_base = cfg_res->start + pp->cfg0_size;
365310 } else if (!pp->va_cfg0_base) {
366311 dev_err(dev, "Missing *config* reg space\n");
367312 }
....@@ -371,78 +316,37 @@
371316 return -ENOMEM;
372317
373318 pp->bridge = bridge;
374
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
375
- &bridge->windows, &pp->io_base);
376
- if (ret)
377
- return ret;
378
-
379
- ret = devm_request_pci_bus_resources(dev, &bridge->windows);
380
- if (ret)
381
- return ret;
382319
383320 /* Get the I/O and memory ranges from DT */
384
- resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
321
+ resource_list_for_each_entry(win, &bridge->windows) {
385322 switch (resource_type(win->res)) {
386323 case IORESOURCE_IO:
387
- ret = devm_pci_remap_iospace(dev, win->res,
388
- pp->io_base);
389
- if (ret) {
390
- dev_warn(dev, "Error %d: failed to map resource %pR\n",
391
- ret, win->res);
392
- resource_list_destroy_entry(win);
393
- } else {
394
- pp->io = win->res;
395
- pp->io->name = "I/O";
396
- pp->io_size = resource_size(pp->io);
397
- pp->io_bus_addr = pp->io->start - win->offset;
398
- }
399
- break;
400
- case IORESOURCE_MEM:
401
- pp->mem = win->res;
402
- pp->mem->name = "MEM";
403
- pp->mem_size = resource_size(pp->mem);
404
- pp->mem_bus_addr = pp->mem->start - win->offset;
324
+ pp->io_size = resource_size(win->res);
325
+ pp->io_bus_addr = win->res->start - win->offset;
326
+ pp->io_base = pci_pio_to_address(win->res->start);
405327 break;
406328 case 0:
407
- pp->cfg = win->res;
408
- pp->cfg0_size = resource_size(pp->cfg) >> 1;
409
- pp->cfg1_size = resource_size(pp->cfg) >> 1;
410
- pp->cfg0_base = pp->cfg->start;
411
- pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
412
- break;
413
- case IORESOURCE_BUS:
414
- pp->busn = win->res;
329
+ dev_err(dev, "Missing *config* reg space\n");
330
+ pp->cfg0_size = resource_size(win->res);
331
+ pp->cfg0_base = win->res->start;
332
+ if (!pci->dbi_base) {
333
+ pci->dbi_base = devm_pci_remap_cfgspace(dev,
334
+ pp->cfg0_base,
335
+ pp->cfg0_size);
336
+ if (!pci->dbi_base) {
337
+ dev_err(dev, "Error with ioremap\n");
338
+ return -ENOMEM;
339
+ }
340
+ }
415341 break;
416342 }
417343 }
418
-
419
- if (!pci->dbi_base) {
420
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
421
- pp->cfg->start,
422
- resource_size(pp->cfg));
423
- if (!pci->dbi_base) {
424
- dev_err(dev, "Error with ioremap\n");
425
- return -ENOMEM;
426
- }
427
- }
428
-
429
- pp->mem_base = pp->mem->start;
430344
431345 if (!pp->va_cfg0_base) {
432346 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
433347 pp->cfg0_base, pp->cfg0_size);
434348 if (!pp->va_cfg0_base) {
435349 dev_err(dev, "Error with ioremap in function\n");
436
- return -ENOMEM;
437
- }
438
- }
439
-
440
- if (!pp->va_cfg1_base) {
441
- pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
442
- pp->cfg1_base,
443
- pp->cfg1_size);
444
- if (!pp->va_cfg1_base) {
445
- dev_err(dev, "Error with ioremap\n");
446350 return -ENOMEM;
447351 }
448352 }
....@@ -454,8 +358,7 @@
454358 if (pci->link_gen < 1)
455359 pci->link_gen = of_pci_get_max_link_speed(np);
456360
457
- if (pci_msi_enabled() &&
458
- !pp->msi_ext) {
361
+ if (pci_msi_enabled()) {
459362 /*
460363 * If a specific SoC driver needs to change the
461364 * default number of vectors, it needs to implement
....@@ -475,6 +378,8 @@
475378 }
476379
477380 if (!pp->ops->msi_host_init) {
381
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
382
+
478383 ret = dw_pcie_allocate_domains(pp);
479384 if (ret)
480385 return ret;
....@@ -483,6 +388,17 @@
483388 irq_set_chained_handler_and_data(pp->msi_irq,
484389 dw_chained_msi_isr,
485390 pp);
391
+
392
+ pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
393
+ sizeof(pp->msi_msg),
394
+ DMA_FROM_DEVICE,
395
+ DMA_ATTR_SKIP_CPU_SYNC);
396
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
397
+ if (ret) {
398
+ dev_err(pci->dev, "Failed to map MSI data\n");
399
+ pp->msi_data = 0;
400
+ goto err_free_msi;
401
+ }
486402 } else {
487403 ret = pp->ops->msi_host_init(pp);
488404 if (ret < 0)
....@@ -490,206 +406,153 @@
490406 }
491407 }
492408
409
+ /* Set default bus ops */
410
+ bridge->ops = &dw_pcie_ops;
411
+ bridge->child_ops = &dw_child_pcie_ops;
412
+
493413 if (pp->ops->host_init) {
494414 ret = pp->ops->host_init(pp);
495415 if (ret)
496416 goto err_free_msi;
497417 }
498418
499
- pp->root_bus_nr = pp->busn->start;
500
-
501
- bridge->dev.parent = dev;
502419 bridge->sysdata = pp;
503
- bridge->busnr = pp->root_bus_nr;
504
- bridge->ops = &dw_pcie_ops;
505
- bridge->map_irq = of_irq_parse_and_map_pci;
506
- bridge->swizzle_irq = pci_common_swizzle;
507420
508
- ret = pci_scan_root_bus_bridge(bridge);
509
- if (ret)
510
- goto err_free_msi;
511
-
512
- bus = bridge->bus;
513
-
514
- if (pp->ops->scan_bus)
515
- pp->ops->scan_bus(pp);
516
-
517
- pci_bus_size_bridges(bus);
518
- pci_bus_assign_resources(bus);
519
-
520
- list_for_each_entry(child, &bus->children, node)
521
- pcie_bus_configure_settings(child);
522
-
523
- pci_bus_add_devices(bus);
524
- return 0;
421
+ ret = pci_host_probe(bridge);
422
+ if (!ret)
423
+ return 0;
525424
526425 err_free_msi:
527426 if (pci_msi_enabled() && !pp->ops->msi_host_init)
528427 dw_pcie_free_msi(pp);
529428 return ret;
530429 }
430
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
531431
532
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
533
- u32 devfn, int where, int size, u32 *val)
432
+void dw_pcie_host_deinit(struct pcie_port *pp)
534433 {
535
- int ret, type;
536
- u32 busdev, cfg_size;
537
- u64 cpu_addr;
538
- void __iomem *va_cfg_base;
434
+ pci_stop_root_bus(pp->bridge->bus);
435
+ pci_remove_root_bus(pp->bridge->bus);
436
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
437
+ dw_pcie_free_msi(pp);
438
+}
439
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
440
+
441
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
442
+ unsigned int devfn, int where)
443
+{
444
+ int type;
445
+ u32 busdev;
446
+ struct pcie_port *pp = bus->sysdata;
539447 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
540448
541
- if (pp->ops->rd_other_conf)
542
- return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
449
+ /*
450
+ * Checking whether the link is up here is a last line of defense
451
+ * against platforms that forward errors on the system bus as
452
+ * SError upon PCI configuration transactions issued when the link
453
+ * is down. This check is racy by definition and does not stop
454
+ * the system from triggering an SError if the link goes down
455
+ * after this check is performed.
456
+ */
457
+ if (!dw_pcie_link_up(pci))
458
+ return NULL;
543459
544460 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
545461 PCIE_ATU_FUNC(PCI_FUNC(devfn));
546462
547
- if (bus->parent->number == pp->root_bus_nr) {
463
+ if (pci_is_root_bus(bus->parent))
548464 type = PCIE_ATU_TYPE_CFG0;
549
- cpu_addr = pp->cfg0_base;
550
- cfg_size = pp->cfg0_size;
551
- va_cfg_base = pp->va_cfg0_base;
552
- } else {
465
+ else
553466 type = PCIE_ATU_TYPE_CFG1;
554
- cpu_addr = pp->cfg1_base;
555
- cfg_size = pp->cfg1_size;
556
- va_cfg_base = pp->va_cfg1_base;
557
- }
558467
559
- dw_pcie_prog_outbound_atu(pci, 0,
560
- type, cpu_addr,
561
- busdev, cfg_size);
562
- ret = dw_pcie_read(va_cfg_base + where, size, val);
563
- if (!ret && pci->io_cfg_atu_shared)
564
- dw_pcie_prog_outbound_atu(pci, 0,
565
- PCIE_ATU_TYPE_IO, pp->io_base,
566
- pp->io_bus_addr, pp->io_size);
567468
568
- return ret;
469
+ dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
470
+
471
+ return pp->va_cfg0_base + where;
569472 }
570473
571
-static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
572
- u32 devfn, int where, int size, u32 val)
474
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
475
+ int where, int size, u32 *val)
573476 {
574
- int ret, type;
575
- u32 busdev, cfg_size;
576
- u64 cpu_addr;
577
- void __iomem *va_cfg_base;
477
+ int ret;
478
+ struct pcie_port *pp = bus->sysdata;
578479 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
579480
580
- if (pp->ops->wr_other_conf)
581
- return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
481
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
582482
583
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
584
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
585
-
586
- if (bus->parent->number == pp->root_bus_nr) {
587
- type = PCIE_ATU_TYPE_CFG0;
588
- cpu_addr = pp->cfg0_base;
589
- cfg_size = pp->cfg0_size;
590
- va_cfg_base = pp->va_cfg0_base;
591
- } else {
592
- type = PCIE_ATU_TYPE_CFG1;
593
- cpu_addr = pp->cfg1_base;
594
- cfg_size = pp->cfg1_size;
595
- va_cfg_base = pp->va_cfg1_base;
596
- }
597
-
598
- dw_pcie_prog_outbound_atu(pci, 0,
599
- type, cpu_addr,
600
- busdev, cfg_size);
601
- ret = dw_pcie_write(va_cfg_base + where, size, val);
602
- if (!ret && pci->io_cfg_atu_shared)
483
+ if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
603484 dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
604485 pp->io_bus_addr, pp->io_size);
605486
606487 return ret;
607488 }
608489
609
-static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
610
- int dev)
490
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
491
+ int where, int size, u32 val)
611492 {
493
+ int ret;
494
+ struct pcie_port *pp = bus->sysdata;
612495 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
613496
614
- /* If there is no link, then there is no device */
615
- if (bus->number != pp->root_bus_nr) {
616
- if (!dw_pcie_link_up(pci))
617
- return 0;
618
- }
497
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
619498
620
- /* Access only one slot on each root port */
621
- if (bus->number == pp->root_bus_nr && dev > 0)
622
- return 0;
499
+ if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
500
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
501
+ pp->io_bus_addr, pp->io_size);
623502
624
- return 1;
503
+ return ret;
625504 }
626505
627
-static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
628
- int size, u32 *val)
629
-{
630
- struct pcie_port *pp = bus->sysdata;
631
-
632
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
633
- *val = 0xffffffff;
634
- return PCIBIOS_DEVICE_NOT_FOUND;
635
- }
636
-
637
- if (bus->number == pp->root_bus_nr)
638
- return dw_pcie_rd_own_conf(pp, where, size, val);
639
-
640
- return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
641
-}
642
-
643
-static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
644
- int where, int size, u32 val)
645
-{
646
- struct pcie_port *pp = bus->sysdata;
647
-
648
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
649
- return PCIBIOS_DEVICE_NOT_FOUND;
650
-
651
- if (bus->number == pp->root_bus_nr)
652
- return dw_pcie_wr_own_conf(pp, where, size, val);
653
-
654
- return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
655
-}
656
-
657
-static struct pci_ops dw_pcie_ops = {
658
- .read = dw_pcie_rd_conf,
659
- .write = dw_pcie_wr_conf,
506
+static struct pci_ops dw_child_pcie_ops = {
507
+ .map_bus = dw_pcie_other_conf_map_bus,
508
+ .read = dw_pcie_rd_other_conf,
509
+ .write = dw_pcie_wr_other_conf,
660510 };
661511
662
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
512
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
663513 {
664
- u32 val;
514
+ struct pcie_port *pp = bus->sysdata;
515
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
665516
666
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
667
- if (val == 0xffffffff)
668
- return 1;
517
+ if (PCI_SLOT(devfn) > 0)
518
+ return NULL;
669519
670
- return 0;
520
+ return pci->dbi_base + where;
671521 }
522
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
523
+
524
+static struct pci_ops dw_pcie_ops = {
525
+ .map_bus = dw_pcie_own_conf_map_bus,
526
+ .read = pci_generic_config_read,
527
+ .write = pci_generic_config_write,
528
+};
672529
673530 void dw_pcie_setup_rc(struct pcie_port *pp)
674531 {
675532 u32 val, ctrl, num_ctrls;
676533 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
677
- int atu_idx = 0;
678
- struct resource_entry *entry, *tmp;
534
+
535
+ /*
536
+ * Enable DBI read-only registers for writing/updating configuration.
537
+ * Write permission gets disabled towards the end of this function.
538
+ */
539
+ dw_pcie_dbi_ro_wr_en(pci);
679540
680541 dw_pcie_setup(pci);
681542
682
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
543
+ if (pci_msi_enabled() && !pp->ops->msi_host_init) {
544
+ num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL);
683545
684
- /* Initialize IRQ Status array */
685
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
686
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
687
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
688
- 4, ~0);
689
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
690
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
691
- 4, ~0);
692
- pp->irq_status[ctrl] = 0;
546
+ /* Initialize IRQ Status array */
547
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
548
+ pp->irq_mask[ctrl] = ~0;
549
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
550
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
551
+ pp->irq_mask[ctrl]);
552
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
553
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
554
+ ~0);
555
+ }
693556 }
694557
695558 /* Setup RC BARs */
....@@ -697,12 +560,10 @@
697560 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
698561
699562 /* Setup interrupt pins */
700
- dw_pcie_dbi_ro_wr_en(pci);
701563 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
702564 val &= 0xffff00ff;
703565 val |= 0x00000100;
704566 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
705
- dw_pcie_dbi_ro_wr_dis(pci);
706567
707568 /* Setup bus numbers */
708569 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
....@@ -718,54 +579,51 @@
718579 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
719580
720581 /*
721
- * If the platform provides ->rd_other_conf, it means the platform
722
- * uses its own address translation component rather than ATU, so
723
- * we should not program the ATU here.
582
+ * If the platform provides its own child bus config accesses, it means
583
+ * the platform uses its own address translation component rather than
584
+ * ATU, so we should not program the ATU here.
724585 */
725
- if (!pp->ops->rd_other_conf) {
726
- /* Get iATU unroll support */
727
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
728
- dev_dbg(pci->dev, "iATU unroll: %s\n",
729
- pci->iatu_unroll_enabled ? "enabled" : "disabled");
730
- }
586
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
587
+ int atu_idx = 0;
588
+ struct resource_entry *entry;
731589
732
- /* Get last memory resource entry */
733
- resource_list_for_each_entry_safe(entry, tmp, &pp->bridge->windows) {
734
- if (resource_type(entry->res) != IORESOURCE_MEM)
735
- continue;
590
+ /* Get last memory resource entry */
591
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
592
+ if (resource_type(entry->res) != IORESOURCE_MEM)
593
+ continue;
736594
737
- if (pci->num_viewport <= ++atu_idx)
738
- break;
595
+ if (pci->num_viewport <= ++atu_idx)
596
+ break;
739597
740
- dw_pcie_prog_outbound_atu(pci, atu_idx,
741
- PCIE_ATU_TYPE_MEM, entry->res->start,
742
- entry->res->start - entry->offset,
743
- resource_size(entry->res));
744
- }
745
-
746
- if (pp->io_size) {
747
- if (pci->num_viewport > ++atu_idx)
748598 dw_pcie_prog_outbound_atu(pci, atu_idx,
749
- PCIE_ATU_TYPE_IO, pp->io_base,
750
- pp->io_bus_addr, pp->io_size);
751
- else
752
- pci->io_cfg_atu_shared = true;
599
+ PCIE_ATU_TYPE_MEM, entry->res->start,
600
+ entry->res->start - entry->offset,
601
+ resource_size(entry->res));
602
+ }
603
+
604
+ if (pp->io_size) {
605
+ if (pci->num_viewport > ++atu_idx)
606
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
607
+ PCIE_ATU_TYPE_IO, pp->io_base,
608
+ pp->io_bus_addr, pp->io_size);
609
+ else
610
+ pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
611
+ }
612
+
613
+ if (pci->num_viewport <= atu_idx)
614
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
615
+ pci->num_viewport);
753616 }
754617
755
- if (pci->num_viewport <= atu_idx)
756
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
757
- pci->num_viewport);
618
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
758619
759
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
760
-
761
- /* Enable write permission for the DBI read-only register */
762
- dw_pcie_dbi_ro_wr_en(pci);
763620 /* Program correct class for RC */
764
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
765
- /* Better disable write permission right after the update */
766
- dw_pcie_dbi_ro_wr_dis(pci);
621
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
767622
768
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
623
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
769624 val |= PORT_LOGIC_SPEED_CHANGE;
770
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
625
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
626
+
627
+ dw_pcie_dbi_ro_wr_dis(pci);
771628 }
629
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);