commit | author | age
|
a07526
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
H |
2 |
/* |
|
3 |
* Synopsys DesignWare PCIe host controller driver |
|
4 |
* |
|
5 |
* Copyright (C) 2013 Samsung Electronics Co., Ltd. |
|
6 |
* https://www.samsung.com |
|
7 |
* |
|
8 |
* Author: Jingoo Han <jg1.han@samsung.com> |
|
9 |
*/ |
|
10 |
|
|
11 |
#include <linux/irqchip/chained_irq.h> |
|
12 |
#include <linux/irqdomain.h> |
|
13 |
#include <linux/msi.h> |
|
14 |
#include <linux/of_address.h> |
|
15 |
#include <linux/of_pci.h> |
|
16 |
#include <linux/pci_regs.h> |
|
17 |
#include <linux/platform_device.h> |
|
18 |
|
|
19 |
#include "../../pci.h" |
|
20 |
#include "pcie-designware.h" |
|
21 |
|
|
22 |
static struct pci_ops dw_pcie_ops; |
|
23 |
static struct pci_ops dw_child_pcie_ops; |
|
24 |
|
|
25 |
static void dw_msi_ack_irq(struct irq_data *d) |
|
26 |
{ |
|
27 |
irq_chip_ack_parent(d); |
|
28 |
} |
|
29 |
|
|
30 |
static void dw_msi_mask_irq(struct irq_data *d) |
|
31 |
{ |
|
32 |
pci_msi_mask_irq(d); |
|
33 |
irq_chip_mask_parent(d); |
|
34 |
} |
|
35 |
|
|
36 |
static void dw_msi_unmask_irq(struct irq_data *d) |
|
37 |
{ |
|
38 |
pci_msi_unmask_irq(d); |
|
39 |
irq_chip_unmask_parent(d); |
|
40 |
} |
|
41 |
|
|
42 |
static struct irq_chip dw_pcie_msi_irq_chip = { |
|
43 |
.name = "PCI-MSI", |
|
44 |
.irq_ack = dw_msi_ack_irq, |
|
45 |
.irq_mask = dw_msi_mask_irq, |
|
46 |
.irq_unmask = dw_msi_unmask_irq, |
2f529f
|
47 |
.flags = IRQCHIP_PIPELINE_SAFE, |
a07526
|
48 |
}; |
H |
49 |
|
|
50 |
static struct msi_domain_info dw_pcie_msi_domain_info = { |
|
51 |
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
|
52 |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), |
|
53 |
.chip = &dw_pcie_msi_irq_chip, |
|
54 |
}; |
|
55 |
|
|
56 |
/* MSI int handler */ |
|
57 |
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
|
58 |
{ |
|
59 |
int i, pos, irq; |
|
60 |
unsigned long val; |
|
61 |
u32 status, num_ctrls; |
|
62 |
irqreturn_t ret = IRQ_NONE; |
|
63 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
64 |
|
|
65 |
num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL); |
|
66 |
|
|
67 |
for (i = 0; i < num_ctrls; i++) { |
|
68 |
status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + |
|
69 |
(i * MSI_REG_CTRL_BLOCK_SIZE)); |
|
70 |
if (!status) |
|
71 |
continue; |
|
72 |
|
|
73 |
ret = IRQ_HANDLED; |
|
74 |
val = status; |
|
75 |
pos = 0; |
|
76 |
while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, |
|
77 |
pos)) != MAX_MSI_IRQS_PER_CTRL) { |
|
78 |
irq = irq_find_mapping(pp->irq_domain, |
|
79 |
(i * MAX_MSI_IRQS_PER_CTRL) + |
|
80 |
pos); |
|
81 |
generic_handle_irq(irq); |
|
82 |
pos++; |
|
83 |
} |
|
84 |
} |
|
85 |
|
|
86 |
return ret; |
|
87 |
} |
|
88 |
EXPORT_SYMBOL_GPL(dw_handle_msi_irq); |
|
89 |
|
|
90 |
/* Chained MSI interrupt service routine */ |
|
91 |
static void dw_chained_msi_isr(struct irq_desc *desc) |
|
92 |
{ |
|
93 |
struct irq_chip *chip = irq_desc_get_chip(desc); |
|
94 |
struct pcie_port *pp; |
|
95 |
|
|
96 |
chained_irq_enter(chip, desc); |
|
97 |
|
|
98 |
pp = irq_desc_get_handler_data(desc); |
|
99 |
dw_handle_msi_irq(pp); |
|
100 |
|
|
101 |
chained_irq_exit(chip, desc); |
|
102 |
} |
|
103 |
|
|
104 |
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) |
|
105 |
{ |
|
106 |
struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
|
107 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
108 |
u64 msi_target; |
|
109 |
|
|
110 |
msi_target = (u64)pp->msi_data; |
|
111 |
|
|
112 |
msg->address_lo = lower_32_bits(msi_target); |
|
113 |
msg->address_hi = upper_32_bits(msi_target); |
|
114 |
|
|
115 |
msg->data = d->hwirq; |
|
116 |
|
|
117 |
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", |
|
118 |
(int)d->hwirq, msg->address_hi, msg->address_lo); |
|
119 |
} |
|
120 |
|
|
121 |
static int dw_pci_msi_set_affinity(struct irq_data *d, |
|
122 |
const struct cpumask *mask, bool force) |
|
123 |
{ |
|
124 |
return -EINVAL; |
|
125 |
} |
|
126 |
|
|
127 |
static void dw_pci_bottom_mask(struct irq_data *d) |
|
128 |
{ |
|
129 |
struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
|
130 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
131 |
unsigned int res, bit, ctrl; |
|
132 |
unsigned long flags; |
|
133 |
|
|
134 |
raw_spin_lock_irqsave(&pp->lock, flags); |
|
135 |
|
|
136 |
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
|
137 |
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
|
138 |
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
|
139 |
|
|
140 |
pp->irq_mask[ctrl] |= BIT(bit); |
|
141 |
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); |
|
142 |
|
|
143 |
raw_spin_unlock_irqrestore(&pp->lock, flags); |
|
144 |
} |
|
145 |
|
|
146 |
static void dw_pci_bottom_unmask(struct irq_data *d) |
|
147 |
{ |
|
148 |
struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
|
149 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
150 |
unsigned int res, bit, ctrl; |
|
151 |
unsigned long flags; |
|
152 |
|
|
153 |
raw_spin_lock_irqsave(&pp->lock, flags); |
|
154 |
|
|
155 |
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
|
156 |
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
|
157 |
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
|
158 |
|
|
159 |
pp->irq_mask[ctrl] &= ~BIT(bit); |
|
160 |
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); |
|
161 |
|
|
162 |
raw_spin_unlock_irqrestore(&pp->lock, flags); |
|
163 |
} |
|
164 |
|
|
165 |
static void dw_pci_bottom_ack(struct irq_data *d) |
|
166 |
{ |
|
167 |
struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
|
168 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
169 |
unsigned int res, bit, ctrl; |
|
170 |
|
|
171 |
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
|
172 |
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
|
173 |
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
|
174 |
|
|
175 |
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); |
|
176 |
} |
|
177 |
|
|
178 |
static struct irq_chip dw_pci_msi_bottom_irq_chip = { |
|
179 |
.name = "DWPCI-MSI", |
|
180 |
.irq_ack = dw_pci_bottom_ack, |
|
181 |
.irq_compose_msi_msg = dw_pci_setup_msi_msg, |
|
182 |
.irq_set_affinity = dw_pci_msi_set_affinity, |
|
183 |
.irq_mask = dw_pci_bottom_mask, |
|
184 |
.irq_unmask = dw_pci_bottom_unmask, |
|
185 |
}; |
|
186 |
|
|
187 |
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, |
|
188 |
unsigned int virq, unsigned int nr_irqs, |
|
189 |
void *args) |
|
190 |
{ |
|
191 |
struct pcie_port *pp = domain->host_data; |
|
192 |
unsigned long flags; |
|
193 |
u32 i; |
|
194 |
int bit; |
|
195 |
|
|
196 |
raw_spin_lock_irqsave(&pp->lock, flags); |
|
197 |
|
|
198 |
bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, |
|
199 |
order_base_2(nr_irqs)); |
|
200 |
|
|
201 |
raw_spin_unlock_irqrestore(&pp->lock, flags); |
|
202 |
|
|
203 |
if (bit < 0) |
|
204 |
return -ENOSPC; |
|
205 |
|
|
206 |
for (i = 0; i < nr_irqs; i++) |
|
207 |
irq_domain_set_info(domain, virq + i, bit + i, |
|
208 |
pp->msi_irq_chip, |
|
209 |
pp, handle_edge_irq, |
|
210 |
NULL, NULL); |
|
211 |
|
|
212 |
return 0; |
|
213 |
} |
|
214 |
|
|
215 |
static void dw_pcie_irq_domain_free(struct irq_domain *domain, |
|
216 |
unsigned int virq, unsigned int nr_irqs) |
|
217 |
{ |
|
218 |
struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
|
219 |
struct pcie_port *pp = domain->host_data; |
|
220 |
unsigned long flags; |
|
221 |
|
|
222 |
raw_spin_lock_irqsave(&pp->lock, flags); |
|
223 |
|
|
224 |
bitmap_release_region(pp->msi_irq_in_use, d->hwirq, |
|
225 |
order_base_2(nr_irqs)); |
|
226 |
|
|
227 |
raw_spin_unlock_irqrestore(&pp->lock, flags); |
|
228 |
} |
|
229 |
|
|
230 |
static const struct irq_domain_ops dw_pcie_msi_domain_ops = { |
|
231 |
.alloc = dw_pcie_irq_domain_alloc, |
|
232 |
.free = dw_pcie_irq_domain_free, |
|
233 |
}; |
|
234 |
|
|
235 |
int dw_pcie_allocate_domains(struct pcie_port *pp) |
|
236 |
{ |
|
237 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
238 |
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); |
|
239 |
|
|
240 |
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, |
|
241 |
&dw_pcie_msi_domain_ops, pp); |
|
242 |
if (!pp->irq_domain) { |
|
243 |
dev_err(pci->dev, "Failed to create IRQ domain\n"); |
|
244 |
return -ENOMEM; |
|
245 |
} |
|
246 |
|
|
247 |
irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); |
|
248 |
|
|
249 |
pp->msi_domain = pci_msi_create_irq_domain(fwnode, |
|
250 |
&dw_pcie_msi_domain_info, |
|
251 |
pp->irq_domain); |
|
252 |
if (!pp->msi_domain) { |
|
253 |
dev_err(pci->dev, "Failed to create MSI domain\n"); |
|
254 |
irq_domain_remove(pp->irq_domain); |
|
255 |
return -ENOMEM; |
|
256 |
} |
|
257 |
|
|
258 |
return 0; |
|
259 |
} |
|
260 |
|
|
261 |
void dw_pcie_free_msi(struct pcie_port *pp) |
|
262 |
{ |
|
263 |
if (pp->msi_irq) { |
|
264 |
irq_set_chained_handler(pp->msi_irq, NULL); |
|
265 |
irq_set_handler_data(pp->msi_irq, NULL); |
|
266 |
} |
|
267 |
|
|
268 |
irq_domain_remove(pp->msi_domain); |
|
269 |
irq_domain_remove(pp->irq_domain); |
|
270 |
|
|
271 |
if (pp->msi_data) { |
|
272 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
273 |
struct device *dev = pci->dev; |
|
274 |
|
|
275 |
dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg), |
|
276 |
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
|
277 |
} |
|
278 |
} |
|
279 |
|
|
280 |
void dw_pcie_msi_init(struct pcie_port *pp) |
|
281 |
{ |
|
282 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
283 |
u64 msi_target = (u64)pp->msi_data; |
|
284 |
|
|
285 |
if (!IS_ENABLED(CONFIG_PCI_MSI)) |
|
286 |
return; |
|
287 |
|
|
288 |
/* Program the msi_data */ |
|
289 |
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); |
|
290 |
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); |
|
291 |
} |
|
292 |
EXPORT_SYMBOL_GPL(dw_pcie_msi_init); |
|
293 |
|
|
294 |
int dw_pcie_host_init(struct pcie_port *pp) |
|
295 |
{ |
|
296 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
297 |
struct device *dev = pci->dev; |
|
298 |
struct device_node *np = dev->of_node; |
|
299 |
struct platform_device *pdev = to_platform_device(dev); |
|
300 |
struct resource_entry *win; |
|
301 |
struct pci_host_bridge *bridge; |
|
302 |
struct resource *cfg_res; |
|
303 |
int ret; |
|
304 |
|
|
305 |
raw_spin_lock_init(&pci->pp.lock); |
|
306 |
|
|
307 |
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
|
308 |
if (cfg_res) { |
|
309 |
pp->cfg0_size = resource_size(cfg_res); |
|
310 |
pp->cfg0_base = cfg_res->start; |
|
311 |
} else if (!pp->va_cfg0_base) { |
|
312 |
dev_err(dev, "Missing *config* reg space\n"); |
|
313 |
} |
|
314 |
|
|
315 |
bridge = devm_pci_alloc_host_bridge(dev, 0); |
|
316 |
if (!bridge) |
|
317 |
return -ENOMEM; |
|
318 |
|
|
319 |
pp->bridge = bridge; |
|
320 |
|
|
321 |
/* Get the I/O and memory ranges from DT */ |
|
322 |
resource_list_for_each_entry(win, &bridge->windows) { |
|
323 |
switch (resource_type(win->res)) { |
|
324 |
case IORESOURCE_IO: |
|
325 |
pp->io_size = resource_size(win->res); |
|
326 |
pp->io_bus_addr = win->res->start - win->offset; |
|
327 |
pp->io_base = pci_pio_to_address(win->res->start); |
|
328 |
break; |
|
329 |
case 0: |
|
330 |
dev_err(dev, "Missing *config* reg space\n"); |
|
331 |
pp->cfg0_size = resource_size(win->res); |
|
332 |
pp->cfg0_base = win->res->start; |
|
333 |
if (!pci->dbi_base) { |
|
334 |
pci->dbi_base = devm_pci_remap_cfgspace(dev, |
|
335 |
pp->cfg0_base, |
|
336 |
pp->cfg0_size); |
|
337 |
if (!pci->dbi_base) { |
|
338 |
dev_err(dev, "Error with ioremap\n"); |
|
339 |
return -ENOMEM; |
|
340 |
} |
|
341 |
} |
|
342 |
break; |
|
343 |
} |
|
344 |
} |
|
345 |
|
|
346 |
if (!pp->va_cfg0_base) { |
|
347 |
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, |
|
348 |
pp->cfg0_base, pp->cfg0_size); |
|
349 |
if (!pp->va_cfg0_base) { |
|
350 |
dev_err(dev, "Error with ioremap in function\n"); |
|
351 |
return -ENOMEM; |
|
352 |
} |
|
353 |
} |
|
354 |
|
|
355 |
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); |
|
356 |
if (ret) |
|
357 |
pci->num_viewport = 2; |
|
358 |
|
|
359 |
if (pci->link_gen < 1) |
|
360 |
pci->link_gen = of_pci_get_max_link_speed(np); |
|
361 |
|
|
362 |
if (pci_msi_enabled()) { |
|
363 |
/* |
|
364 |
* If a specific SoC driver needs to change the |
|
365 |
* default number of vectors, it needs to implement |
|
366 |
* the set_num_vectors callback. |
|
367 |
*/ |
|
368 |
if (!pp->ops->set_num_vectors) { |
|
369 |
pp->num_vectors = MSI_DEF_NUM_VECTORS; |
|
370 |
} else { |
|
371 |
pp->ops->set_num_vectors(pp); |
|
372 |
|
|
373 |
if (pp->num_vectors > MAX_MSI_IRQS || |
|
374 |
pp->num_vectors == 0) { |
|
375 |
dev_err(dev, |
|
376 |
"Invalid number of vectors\n"); |
|
377 |
return -EINVAL; |
|
378 |
} |
|
379 |
} |
|
380 |
|
|
381 |
if (!pp->ops->msi_host_init) { |
|
382 |
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; |
|
383 |
|
|
384 |
ret = dw_pcie_allocate_domains(pp); |
|
385 |
if (ret) |
|
386 |
return ret; |
|
387 |
|
|
388 |
if (pp->msi_irq) |
|
389 |
irq_set_chained_handler_and_data(pp->msi_irq, |
|
390 |
dw_chained_msi_isr, |
|
391 |
pp); |
|
392 |
|
|
393 |
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg, |
|
394 |
sizeof(pp->msi_msg), |
|
395 |
DMA_FROM_DEVICE, |
|
396 |
DMA_ATTR_SKIP_CPU_SYNC); |
|
397 |
ret = dma_mapping_error(pci->dev, pp->msi_data); |
|
398 |
if (ret) { |
|
399 |
dev_err(pci->dev, "Failed to map MSI data\n"); |
|
400 |
pp->msi_data = 0; |
|
401 |
goto err_free_msi; |
|
402 |
} |
|
403 |
} else { |
|
404 |
ret = pp->ops->msi_host_init(pp); |
|
405 |
if (ret < 0) |
|
406 |
return ret; |
|
407 |
} |
|
408 |
} |
|
409 |
|
|
410 |
/* Set default bus ops */ |
|
411 |
bridge->ops = &dw_pcie_ops; |
|
412 |
bridge->child_ops = &dw_child_pcie_ops; |
|
413 |
|
|
414 |
if (pp->ops->host_init) { |
|
415 |
ret = pp->ops->host_init(pp); |
|
416 |
if (ret) |
|
417 |
goto err_free_msi; |
|
418 |
} |
|
419 |
|
|
420 |
bridge->sysdata = pp; |
|
421 |
|
|
422 |
ret = pci_host_probe(bridge); |
|
423 |
if (!ret) |
|
424 |
return 0; |
|
425 |
|
|
426 |
err_free_msi: |
|
427 |
if (pci_msi_enabled() && !pp->ops->msi_host_init) |
|
428 |
dw_pcie_free_msi(pp); |
|
429 |
return ret; |
|
430 |
} |
|
431 |
EXPORT_SYMBOL_GPL(dw_pcie_host_init); |
|
432 |
|
|
433 |
void dw_pcie_host_deinit(struct pcie_port *pp) |
|
434 |
{ |
|
435 |
pci_stop_root_bus(pp->bridge->bus); |
|
436 |
pci_remove_root_bus(pp->bridge->bus); |
|
437 |
if (pci_msi_enabled() && !pp->ops->msi_host_init) |
|
438 |
dw_pcie_free_msi(pp); |
|
439 |
} |
|
440 |
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); |
|
441 |
|
|
442 |
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, |
|
443 |
unsigned int devfn, int where) |
|
444 |
{ |
|
445 |
int type; |
|
446 |
u32 busdev; |
|
447 |
struct pcie_port *pp = bus->sysdata; |
|
448 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
449 |
|
|
450 |
/* |
|
451 |
* Checking whether the link is up here is a last line of defense |
|
452 |
* against platforms that forward errors on the system bus as |
|
453 |
* SError upon PCI configuration transactions issued when the link |
|
454 |
* is down. This check is racy by definition and does not stop |
|
455 |
* the system from triggering an SError if the link goes down |
|
456 |
* after this check is performed. |
|
457 |
*/ |
|
458 |
if (!dw_pcie_link_up(pci)) |
|
459 |
return NULL; |
|
460 |
|
|
461 |
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | |
|
462 |
PCIE_ATU_FUNC(PCI_FUNC(devfn)); |
|
463 |
|
|
464 |
if (pci_is_root_bus(bus->parent)) |
|
465 |
type = PCIE_ATU_TYPE_CFG0; |
|
466 |
else |
|
467 |
type = PCIE_ATU_TYPE_CFG1; |
|
468 |
|
|
469 |
|
|
470 |
dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size); |
|
471 |
|
|
472 |
return pp->va_cfg0_base + where; |
|
473 |
} |
|
474 |
|
|
475 |
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, |
|
476 |
int where, int size, u32 *val) |
|
477 |
{ |
|
478 |
int ret; |
|
479 |
struct pcie_port *pp = bus->sysdata; |
|
480 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
481 |
|
|
482 |
ret = pci_generic_config_read(bus, devfn, where, size, val); |
|
483 |
|
|
484 |
if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED)) |
|
485 |
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, |
|
486 |
pp->io_bus_addr, pp->io_size); |
|
487 |
|
|
488 |
return ret; |
|
489 |
} |
|
490 |
|
|
491 |
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, |
|
492 |
int where, int size, u32 val) |
|
493 |
{ |
|
494 |
int ret; |
|
495 |
struct pcie_port *pp = bus->sysdata; |
|
496 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
497 |
|
|
498 |
ret = pci_generic_config_write(bus, devfn, where, size, val); |
|
499 |
|
|
500 |
if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED)) |
|
501 |
dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, |
|
502 |
pp->io_bus_addr, pp->io_size); |
|
503 |
|
|
504 |
return ret; |
|
505 |
} |
|
506 |
|
|
507 |
static struct pci_ops dw_child_pcie_ops = { |
|
508 |
.map_bus = dw_pcie_other_conf_map_bus, |
|
509 |
.read = dw_pcie_rd_other_conf, |
|
510 |
.write = dw_pcie_wr_other_conf, |
|
511 |
}; |
|
512 |
|
|
513 |
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) |
|
514 |
{ |
|
515 |
struct pcie_port *pp = bus->sysdata; |
|
516 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
517 |
|
|
518 |
if (PCI_SLOT(devfn) > 0) |
|
519 |
return NULL; |
|
520 |
|
|
521 |
return pci->dbi_base + where; |
|
522 |
} |
|
523 |
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); |
|
524 |
|
|
525 |
static struct pci_ops dw_pcie_ops = { |
|
526 |
.map_bus = dw_pcie_own_conf_map_bus, |
|
527 |
.read = pci_generic_config_read, |
|
528 |
.write = pci_generic_config_write, |
|
529 |
}; |
|
530 |
|
|
531 |
void dw_pcie_setup_rc(struct pcie_port *pp) |
|
532 |
{ |
|
533 |
u32 val, ctrl, num_ctrls; |
|
534 |
struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
|
535 |
|
|
536 |
/* |
|
537 |
* Enable DBI read-only registers for writing/updating configuration. |
|
538 |
* Write permission gets disabled towards the end of this function. |
|
539 |
*/ |
|
540 |
dw_pcie_dbi_ro_wr_en(pci); |
|
541 |
|
|
542 |
dw_pcie_setup(pci); |
|
543 |
|
|
544 |
if (pci_msi_enabled() && !pp->ops->msi_host_init) { |
|
545 |
num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL); |
|
546 |
|
|
547 |
/* Initialize IRQ Status array */ |
|
548 |
for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
|
549 |
pp->irq_mask[ctrl] = ~0; |
|
550 |
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + |
|
551 |
(ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
|
552 |
pp->irq_mask[ctrl]); |
|
553 |
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + |
|
554 |
(ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
|
555 |
~0); |
|
556 |
} |
|
557 |
} |
|
558 |
|
|
559 |
/* Setup RC BARs */ |
|
560 |
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); |
|
561 |
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); |
|
562 |
|
|
563 |
/* Setup interrupt pins */ |
|
564 |
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); |
|
565 |
val &= 0xffff00ff; |
|
566 |
val |= 0x00000100; |
|
567 |
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); |
|
568 |
|
|
569 |
/* Setup bus numbers */ |
|
570 |
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); |
|
571 |
val &= 0xff000000; |
|
572 |
val |= 0x00ff0100; |
|
573 |
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); |
|
574 |
|
|
575 |
/* Setup command register */ |
|
576 |
val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
|
577 |
val &= 0xffff0000; |
|
578 |
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
|
579 |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR; |
|
580 |
dw_pcie_writel_dbi(pci, PCI_COMMAND, val); |
|
581 |
|
|
582 |
/* |
|
583 |
* If the platform provides its own child bus config accesses, it means |
|
584 |
* the platform uses its own address translation component rather than |
|
585 |
* ATU, so we should not program the ATU here. |
|
586 |
*/ |
|
587 |
if (pp->bridge->child_ops == &dw_child_pcie_ops) { |
|
588 |
int atu_idx = 0; |
|
589 |
struct resource_entry *entry; |
|
590 |
|
|
591 |
/* Get last memory resource entry */ |
|
592 |
resource_list_for_each_entry(entry, &pp->bridge->windows) { |
|
593 |
if (resource_type(entry->res) != IORESOURCE_MEM) |
|
594 |
continue; |
|
595 |
|
|
596 |
if (pci->num_viewport <= ++atu_idx) |
|
597 |
break; |
|
598 |
|
|
599 |
dw_pcie_prog_outbound_atu(pci, atu_idx, |
|
600 |
PCIE_ATU_TYPE_MEM, entry->res->start, |
|
601 |
entry->res->start - entry->offset, |
|
602 |
resource_size(entry->res)); |
|
603 |
} |
|
604 |
|
|
605 |
if (pp->io_size) { |
|
606 |
if (pci->num_viewport > ++atu_idx) |
|
607 |
dw_pcie_prog_outbound_atu(pci, atu_idx, |
|
608 |
PCIE_ATU_TYPE_IO, pp->io_base, |
|
609 |
pp->io_bus_addr, pp->io_size); |
|
610 |
else |
|
611 |
pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED; |
|
612 |
} |
|
613 |
|
|
614 |
if (pci->num_viewport <= atu_idx) |
|
615 |
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)", |
|
616 |
pci->num_viewport); |
|
617 |
} |
|
618 |
|
|
619 |
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); |
|
620 |
|
|
621 |
/* Program correct class for RC */ |
|
622 |
dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); |
|
623 |
|
|
624 |
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); |
|
625 |
val |= PORT_LOGIC_SPEED_CHANGE; |
|
626 |
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); |
|
627 |
|
|
628 |
dw_pcie_dbi_ro_wr_dis(pci); |
|
629 |
} |
|
630 |
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); |