hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/pci/controller/dwc/pci-keystone.c
....@@ -3,47 +3,541 @@
33 * PCIe host controller driver for Texas Instruments Keystone SoCs
44 *
55 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6
- * http://www.ti.com
6
+ * https://www.ti.com
77 *
88 * Author: Murali Karicheri <m-karicheri2@ti.com>
99 * Implementation based on pci-exynos.c and pcie-designware.c
1010 */
1111
12
-#include <linux/irqchip/chained_irq.h>
1312 #include <linux/clk.h>
1413 #include <linux/delay.h>
15
-#include <linux/interrupt.h>
16
-#include <linux/irqdomain.h>
14
+#include <linux/gpio/consumer.h>
1715 #include <linux/init.h>
16
+#include <linux/interrupt.h>
17
+#include <linux/irqchip/chained_irq.h>
18
+#include <linux/irqdomain.h>
19
+#include <linux/mfd/syscon.h>
1820 #include <linux/msi.h>
19
-#include <linux/of_irq.h>
2021 #include <linux/of.h>
22
+#include <linux/of_device.h>
23
+#include <linux/of_irq.h>
2124 #include <linux/of_pci.h>
22
-#include <linux/platform_device.h>
2325 #include <linux/phy/phy.h>
26
+#include <linux/platform_device.h>
27
+#include <linux/regmap.h>
2428 #include <linux/resource.h>
2529 #include <linux/signal.h>
2630
31
+#include "../../pci.h"
2732 #include "pcie-designware.h"
28
-#include "pci-keystone.h"
2933
30
-#define DRIVER_NAME "keystone-pcie"
34
+#define PCIE_VENDORID_MASK 0xffff
35
+#define PCIE_DEVICEID_SHIFT 16
3136
32
-/* DEV_STAT_CTRL */
33
-#define PCIE_CAP_BASE 0x70
37
+/* Application registers */
38
+#define CMD_STATUS 0x004
39
+#define LTSSM_EN_VAL BIT(0)
40
+#define OB_XLAT_EN_VAL BIT(1)
41
+#define DBI_CS2 BIT(5)
42
+
43
+#define CFG_SETUP 0x008
44
+#define CFG_BUS(x) (((x) & 0xff) << 16)
45
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
46
+#define CFG_FUNC(x) ((x) & 0x7)
47
+#define CFG_TYPE1 BIT(24)
48
+
49
+#define OB_SIZE 0x030
50
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
51
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
52
+#define OB_ENABLEN BIT(0)
53
+#define OB_WIN_SIZE 8 /* 8MB */
54
+
55
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
56
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
57
+#define PCIE_EP_IRQ_SET 0x64
58
+#define PCIE_EP_IRQ_CLR 0x68
59
+#define INT_ENABLE BIT(0)
60
+
61
+/* IRQ register defines */
62
+#define IRQ_EOI 0x050
63
+
64
+#define MSI_IRQ 0x054
65
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
66
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
67
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
68
+#define MSI_IRQ_OFFSET 4
69
+
70
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
71
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
72
+#define INTx_EN BIT(0)
73
+
74
+#define ERR_IRQ_STATUS 0x1c4
75
+#define ERR_IRQ_ENABLE_SET 0x1c8
76
+#define ERR_AER BIT(5) /* ECRC error */
77
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
78
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
79
+#define ERR_CORR BIT(3) /* Correctable error */
80
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
81
+#define ERR_FATAL BIT(1) /* Fatal error */
82
+#define ERR_SYS BIT(0) /* System error */
83
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
84
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
3485
3586 /* PCIE controller device IDs */
36
-#define PCIE_RC_K2HK 0xb008
37
-#define PCIE_RC_K2E 0xb009
38
-#define PCIE_RC_K2L 0xb00a
39
-#define PCIE_RC_K2G 0xb00b
87
+#define PCIE_RC_K2HK 0xb008
88
+#define PCIE_RC_K2E 0xb009
89
+#define PCIE_RC_K2L 0xb00a
90
+#define PCIE_RC_K2G 0xb00b
4091
41
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
92
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
93
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
4294
43
-static void quirk_limit_mrrs(struct pci_dev *dev)
95
+#define EP 0x0
96
+#define LEG_EP 0x1
97
+#define RC 0x2
98
+
99
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
100
+
101
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
102
+#define AM654_WIN_SIZE SZ_64K
103
+
104
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
105
+
106
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
107
+
108
+struct ks_pcie_of_data {
109
+ enum dw_pcie_device_mode mode;
110
+ const struct dw_pcie_host_ops *host_ops;
111
+ const struct dw_pcie_ep_ops *ep_ops;
112
+ unsigned int version;
113
+};
114
+
115
+struct keystone_pcie {
116
+ struct dw_pcie *pci;
117
+ /* PCI Device ID */
118
+ u32 device_id;
119
+ int legacy_host_irqs[PCI_NUM_INTX];
120
+ struct device_node *legacy_intc_np;
121
+
122
+ int msi_host_irq;
123
+ int num_lanes;
124
+ struct phy **phy;
125
+ struct device_link **link;
126
+ struct device_node *msi_intc_np;
127
+ struct irq_domain *legacy_irq_domain;
128
+ struct device_node *np;
129
+
130
+ /* Application register space */
131
+ void __iomem *va_app_base; /* DT 1st resource */
132
+ struct resource app;
133
+ bool is_am6;
134
+};
135
+
136
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
137
+{
138
+ return readl(ks_pcie->va_app_base + offset);
139
+}
140
+
141
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
142
+ u32 val)
143
+{
144
+ writel(val, ks_pcie->va_app_base + offset);
145
+}
146
+
147
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
148
+{
149
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
150
+ struct keystone_pcie *ks_pcie;
151
+ u32 irq = data->hwirq;
152
+ struct dw_pcie *pci;
153
+ u32 reg_offset;
154
+ u32 bit_pos;
155
+
156
+ pci = to_dw_pcie_from_pp(pp);
157
+ ks_pcie = to_keystone_pcie(pci);
158
+
159
+ reg_offset = irq % 8;
160
+ bit_pos = irq >> 3;
161
+
162
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
163
+ BIT(bit_pos));
164
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
165
+}
166
+
167
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
168
+{
169
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
170
+ struct keystone_pcie *ks_pcie;
171
+ struct dw_pcie *pci;
172
+ u64 msi_target;
173
+
174
+ pci = to_dw_pcie_from_pp(pp);
175
+ ks_pcie = to_keystone_pcie(pci);
176
+
177
+ msi_target = ks_pcie->app.start + MSI_IRQ;
178
+ msg->address_lo = lower_32_bits(msi_target);
179
+ msg->address_hi = upper_32_bits(msi_target);
180
+ msg->data = data->hwirq;
181
+
182
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
183
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
184
+}
185
+
186
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
187
+ const struct cpumask *mask, bool force)
188
+{
189
+ return -EINVAL;
190
+}
191
+
192
+static void ks_pcie_msi_mask(struct irq_data *data)
193
+{
194
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
195
+ struct keystone_pcie *ks_pcie;
196
+ u32 irq = data->hwirq;
197
+ struct dw_pcie *pci;
198
+ unsigned long flags;
199
+ u32 reg_offset;
200
+ u32 bit_pos;
201
+
202
+ raw_spin_lock_irqsave(&pp->lock, flags);
203
+
204
+ pci = to_dw_pcie_from_pp(pp);
205
+ ks_pcie = to_keystone_pcie(pci);
206
+
207
+ reg_offset = irq % 8;
208
+ bit_pos = irq >> 3;
209
+
210
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
211
+ BIT(bit_pos));
212
+
213
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
214
+}
215
+
216
+static void ks_pcie_msi_unmask(struct irq_data *data)
217
+{
218
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
219
+ struct keystone_pcie *ks_pcie;
220
+ u32 irq = data->hwirq;
221
+ struct dw_pcie *pci;
222
+ unsigned long flags;
223
+ u32 reg_offset;
224
+ u32 bit_pos;
225
+
226
+ raw_spin_lock_irqsave(&pp->lock, flags);
227
+
228
+ pci = to_dw_pcie_from_pp(pp);
229
+ ks_pcie = to_keystone_pcie(pci);
230
+
231
+ reg_offset = irq % 8;
232
+ bit_pos = irq >> 3;
233
+
234
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
235
+ BIT(bit_pos));
236
+
237
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
238
+}
239
+
240
+static struct irq_chip ks_pcie_msi_irq_chip = {
241
+ .name = "KEYSTONE-PCI-MSI",
242
+ .irq_ack = ks_pcie_msi_irq_ack,
243
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
244
+ .irq_set_affinity = ks_pcie_msi_set_affinity,
245
+ .irq_mask = ks_pcie_msi_mask,
246
+ .irq_unmask = ks_pcie_msi_unmask,
247
+};
248
+
249
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
250
+{
251
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
252
+ return dw_pcie_allocate_domains(pp);
253
+}
254
+
255
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
256
+ int offset)
257
+{
258
+ struct dw_pcie *pci = ks_pcie->pci;
259
+ struct device *dev = pci->dev;
260
+ u32 pending;
261
+ int virq;
262
+
263
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
264
+
265
+ if (BIT(0) & pending) {
266
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
267
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
268
+ generic_handle_irq(virq);
269
+ }
270
+
271
+ /* EOI the INTx interrupt */
272
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
273
+}
274
+
275
+/*
276
+ * Dummy function so that DW core doesn't configure MSI
277
+ */
278
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
279
+{
280
+ return 0;
281
+}
282
+
283
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
284
+{
285
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
286
+}
287
+
288
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
289
+{
290
+ u32 reg;
291
+ struct device *dev = ks_pcie->pci->dev;
292
+
293
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
294
+ if (!reg)
295
+ return IRQ_NONE;
296
+
297
+ if (reg & ERR_SYS)
298
+ dev_err(dev, "System Error\n");
299
+
300
+ if (reg & ERR_FATAL)
301
+ dev_err(dev, "Fatal Error\n");
302
+
303
+ if (reg & ERR_NONFATAL)
304
+ dev_dbg(dev, "Non Fatal Error\n");
305
+
306
+ if (reg & ERR_CORR)
307
+ dev_dbg(dev, "Correctable Error\n");
308
+
309
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
310
+ dev_err(dev, "AXI tag lookup fatal Error\n");
311
+
312
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
313
+ dev_err(dev, "ECRC Error\n");
314
+
315
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
316
+
317
+ return IRQ_HANDLED;
318
+}
319
+
320
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
321
+{
322
+}
323
+
324
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
325
+{
326
+}
327
+
328
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
329
+{
330
+}
331
+
332
+static struct irq_chip ks_pcie_legacy_irq_chip = {
333
+ .name = "Keystone-PCI-Legacy-IRQ",
334
+ .irq_ack = ks_pcie_ack_legacy_irq,
335
+ .irq_mask = ks_pcie_mask_legacy_irq,
336
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
337
+};
338
+
339
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
340
+ unsigned int irq,
341
+ irq_hw_number_t hw_irq)
342
+{
343
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
344
+ handle_level_irq);
345
+ irq_set_chip_data(irq, d->host_data);
346
+
347
+ return 0;
348
+}
349
+
350
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
351
+ .map = ks_pcie_init_legacy_irq_map,
352
+ .xlate = irq_domain_xlate_onetwocell,
353
+};
354
+
355
+/**
356
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
357
+ * registers
358
+ *
359
+ * Since modification of dbi_cs2 involves different clock domain, read the
360
+ * status back to ensure the transition is complete.
361
+ */
362
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
363
+{
364
+ u32 val;
365
+
366
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
367
+ val |= DBI_CS2;
368
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
369
+
370
+ do {
371
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
372
+ } while (!(val & DBI_CS2));
373
+}
374
+
375
+/**
376
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
377
+ *
378
+ * Since modification of dbi_cs2 involves different clock domain, read the
379
+ * status back to ensure the transition is complete.
380
+ */
381
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
382
+{
383
+ u32 val;
384
+
385
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
386
+ val &= ~DBI_CS2;
387
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
388
+
389
+ do {
390
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
391
+ } while (val & DBI_CS2);
392
+}
393
+
394
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
395
+{
396
+ u32 val;
397
+ struct dw_pcie *pci = ks_pcie->pci;
398
+ struct pcie_port *pp = &pci->pp;
399
+ u32 num_viewport = pci->num_viewport;
400
+ u64 start, end;
401
+ struct resource *mem;
402
+ int i;
403
+
404
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
405
+ start = mem->start;
406
+ end = mem->end;
407
+
408
+ /* Disable BARs for inbound access */
409
+ ks_pcie_set_dbi_mode(ks_pcie);
410
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
411
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
412
+ ks_pcie_clear_dbi_mode(ks_pcie);
413
+
414
+ if (ks_pcie->is_am6)
415
+ return;
416
+
417
+ val = ilog2(OB_WIN_SIZE);
418
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
419
+
420
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
421
+ for (i = 0; i < num_viewport && (start < end); i++) {
422
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
423
+ lower_32_bits(start) | OB_ENABLEN);
424
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
425
+ upper_32_bits(start));
426
+ start += OB_WIN_SIZE * SZ_1M;
427
+ }
428
+
429
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
430
+ val |= OB_XLAT_EN_VAL;
431
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
432
+}
433
+
434
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
435
+ unsigned int devfn, int where)
436
+{
437
+ struct pcie_port *pp = bus->sysdata;
438
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
439
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
440
+ u32 reg;
441
+
442
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
443
+ CFG_FUNC(PCI_FUNC(devfn));
444
+ if (!pci_is_root_bus(bus->parent))
445
+ reg |= CFG_TYPE1;
446
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
447
+
448
+ return pp->va_cfg0_base + where;
449
+}
450
+
451
+static struct pci_ops ks_child_pcie_ops = {
452
+ .map_bus = ks_pcie_other_map_bus,
453
+ .read = pci_generic_config_read,
454
+ .write = pci_generic_config_write,
455
+};
456
+
457
+/**
458
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
459
+ *
460
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
461
+ */
462
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
463
+{
464
+ struct pcie_port *pp = bus->sysdata;
465
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
466
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
467
+
468
+ if (!pci_is_root_bus(bus))
469
+ return 0;
470
+
471
+ /* Configure and set up BAR0 */
472
+ ks_pcie_set_dbi_mode(ks_pcie);
473
+
474
+ /* Enable BAR0 */
475
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
476
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
477
+
478
+ ks_pcie_clear_dbi_mode(ks_pcie);
479
+
480
+ /*
481
+ * For BAR0, just setting bus address for inbound writes (MSI) should
482
+ * be sufficient. Use physical address to avoid any conflicts.
483
+ */
484
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
485
+
486
+ return 0;
487
+}
488
+
489
+static struct pci_ops ks_pcie_ops = {
490
+ .map_bus = dw_pcie_own_conf_map_bus,
491
+ .read = pci_generic_config_read,
492
+ .write = pci_generic_config_write,
493
+ .add_bus = ks_pcie_v3_65_add_bus,
494
+};
495
+
496
+/**
497
+ * ks_pcie_link_up() - Check if link up
498
+ */
499
+static int ks_pcie_link_up(struct dw_pcie *pci)
500
+{
501
+ u32 val;
502
+
503
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
504
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
505
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
506
+}
507
+
508
+static void ks_pcie_stop_link(struct dw_pcie *pci)
509
+{
510
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
511
+ u32 val;
512
+
513
+ /* Disable Link training */
514
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
515
+ val &= ~LTSSM_EN_VAL;
516
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
517
+}
518
+
519
+static int ks_pcie_start_link(struct dw_pcie *pci)
520
+{
521
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
522
+ struct device *dev = pci->dev;
523
+ u32 val;
524
+
525
+ if (dw_pcie_link_up(pci)) {
526
+ dev_dbg(dev, "link is already up\n");
527
+ return 0;
528
+ }
529
+
530
+ /* Initiate Link Training */
531
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
532
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
533
+
534
+ return 0;
535
+}
536
+
537
+static void ks_pcie_quirk(struct pci_dev *dev)
44538 {
45539 struct pci_bus *bus = dev->bus;
46
- struct pci_dev *bridge = bus->self;
540
+ struct pci_dev *bridge;
47541 static const struct pci_device_id rc_pci_devids[] = {
48542 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
49543 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
....@@ -57,7 +551,7 @@
57551 };
58552
59553 if (pci_is_root_bus(bus))
60
- return;
554
+ bridge = dev;
61555
62556 /* look for the host bridge */
63557 while (!pci_is_root_bus(bus)) {
....@@ -65,56 +559,34 @@
65559 bus = bus->parent;
66560 }
67561
68
- if (bridge) {
69
- /*
70
- * Keystone PCI controller has a h/w limitation of
71
- * 256 bytes maximum read request size. It can't handle
72
- * anything higher than this. So force this limit on
73
- * all downstream devices.
74
- */
75
- if (pci_match_id(rc_pci_devids, bridge)) {
76
- if (pcie_get_readrq(dev) > 256) {
77
- dev_info(&dev->dev, "limiting MRRS to 256\n");
78
- pcie_set_readrq(dev, 256);
79
- }
562
+ if (!bridge)
563
+ return;
564
+
565
+ /*
566
+ * Keystone PCI controller has a h/w limitation of
567
+ * 256 bytes maximum read request size. It can't handle
568
+ * anything higher than this. So force this limit on
569
+ * all downstream devices.
570
+ */
571
+ if (pci_match_id(rc_pci_devids, bridge)) {
572
+ if (pcie_get_readrq(dev) > 256) {
573
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
574
+ pcie_set_readrq(dev, 256);
80575 }
81576 }
82577 }
83
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
84
-
85
-static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
86
-{
87
- struct dw_pcie *pci = ks_pcie->pci;
88
- struct pcie_port *pp = &pci->pp;
89
- struct device *dev = pci->dev;
90
- unsigned int retries;
91
-
92
- dw_pcie_setup_rc(pp);
93
-
94
- if (dw_pcie_link_up(pci)) {
95
- dev_info(dev, "Link already up\n");
96
- return 0;
97
- }
98
-
99
- /* check if the link is up or not */
100
- for (retries = 0; retries < 5; retries++) {
101
- ks_dw_pcie_initiate_link_train(ks_pcie);
102
- if (!dw_pcie_wait_for_link(pci))
103
- return 0;
104
- }
105
-
106
- dev_err(dev, "phy link never came up\n");
107
- return -ETIMEDOUT;
108
-}
578
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
109579
110580 static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
111581 {
112
- unsigned int irq = irq_desc_get_irq(desc);
582
+ unsigned int irq = desc->irq_data.hwirq;
113583 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
114
- u32 offset = irq - ks_pcie->msi_host_irqs[0];
584
+ u32 offset = irq - ks_pcie->msi_host_irq;
115585 struct dw_pcie *pci = ks_pcie->pci;
586
+ struct pcie_port *pp = &pci->pp;
116587 struct device *dev = pci->dev;
117588 struct irq_chip *chip = irq_desc_get_chip(desc);
589
+ u32 vector, virq, reg, pos;
118590
119591 dev_dbg(dev, "%s, irq %d\n", __func__, irq);
120592
....@@ -124,7 +596,23 @@
124596 * ack operation.
125597 */
126598 chained_irq_enter(chip, desc);
127
- ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
599
+
600
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
601
+ /*
602
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
603
+ * shows 1, 9, 17, 25 and so forth
604
+ */
605
+ for (pos = 0; pos < 4; pos++) {
606
+ if (!(reg & BIT(pos)))
607
+ continue;
608
+
609
+ vector = offset + (pos << 3);
610
+ virq = irq_linear_revmap(pp->irq_domain, vector);
611
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
612
+ virq);
613
+ generic_handle_irq(virq);
614
+ }
615
+
128616 chained_irq_exit(chip, desc);
129617 }
130618
....@@ -153,91 +641,120 @@
153641 * ack operation.
154642 */
155643 chained_irq_enter(chip, desc);
156
- ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
644
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
157645 chained_irq_exit(chip, desc);
158646 }
159647
160
-static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
161
- char *controller, int *num_irqs)
648
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
162649 {
163
- int temp, max_host_irqs, legacy = 1, *host_irqs;
164650 struct device *dev = ks_pcie->pci->dev;
165
- struct device_node *np_pcie = dev->of_node, **np_temp;
651
+ struct device_node *np = ks_pcie->np;
652
+ struct device_node *intc_np;
653
+ struct irq_data *irq_data;
654
+ int irq_count, irq, ret, i;
166655
167
- if (!strcmp(controller, "msi-interrupt-controller"))
168
- legacy = 0;
169
-
170
- if (legacy) {
171
- np_temp = &ks_pcie->legacy_intc_np;
172
- max_host_irqs = PCI_NUM_INTX;
173
- host_irqs = &ks_pcie->legacy_host_irqs[0];
174
- } else {
175
- np_temp = &ks_pcie->msi_intc_np;
176
- max_host_irqs = MAX_MSI_HOST_IRQS;
177
- host_irqs = &ks_pcie->msi_host_irqs[0];
178
- }
179
-
180
- /* interrupt controller is in a child node */
181
- *np_temp = of_get_child_by_name(np_pcie, controller);
182
- if (!(*np_temp)) {
183
- dev_err(dev, "Node for %s is absent\n", controller);
184
- return -EINVAL;
185
- }
186
-
187
- temp = of_irq_count(*np_temp);
188
- if (!temp) {
189
- dev_err(dev, "No IRQ entries in %s\n", controller);
190
- of_node_put(*np_temp);
191
- return -EINVAL;
192
- }
193
-
194
- if (temp > max_host_irqs)
195
- dev_warn(dev, "Too many %s interrupts defined %u\n",
196
- (legacy ? "legacy" : "MSI"), temp);
197
-
198
- /*
199
- * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
200
- * 7 (MSI)
201
- */
202
- for (temp = 0; temp < max_host_irqs; temp++) {
203
- host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
204
- if (!host_irqs[temp])
205
- break;
206
- }
207
-
208
- of_node_put(*np_temp);
209
-
210
- if (temp) {
211
- *num_irqs = temp;
656
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
212657 return 0;
658
+
659
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
660
+ if (!intc_np) {
661
+ if (ks_pcie->is_am6)
662
+ return 0;
663
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
664
+ return -EINVAL;
213665 }
214666
215
- return -EINVAL;
667
+ irq_count = of_irq_count(intc_np);
668
+ if (!irq_count) {
669
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
670
+ ret = -EINVAL;
671
+ goto err;
672
+ }
673
+
674
+ for (i = 0; i < irq_count; i++) {
675
+ irq = irq_of_parse_and_map(intc_np, i);
676
+ if (!irq) {
677
+ ret = -EINVAL;
678
+ goto err;
679
+ }
680
+
681
+ if (!ks_pcie->msi_host_irq) {
682
+ irq_data = irq_get_irq_data(irq);
683
+ if (!irq_data) {
684
+ ret = -EINVAL;
685
+ goto err;
686
+ }
687
+ ks_pcie->msi_host_irq = irq_data->hwirq;
688
+ }
689
+
690
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
691
+ ks_pcie);
692
+ }
693
+
694
+ of_node_put(intc_np);
695
+ return 0;
696
+
697
+err:
698
+ of_node_put(intc_np);
699
+ return ret;
216700 }
217701
218
-static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
702
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
219703 {
220
- int i;
704
+ struct device *dev = ks_pcie->pci->dev;
705
+ struct irq_domain *legacy_irq_domain;
706
+ struct device_node *np = ks_pcie->np;
707
+ struct device_node *intc_np;
708
+ int irq_count, irq, ret = 0, i;
221709
222
- /* Legacy IRQ */
223
- for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
224
- irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
710
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
711
+ if (!intc_np) {
712
+ /*
713
+ * Since legacy interrupts are modeled as edge-interrupts in
714
+ * AM6, keep it disabled for now.
715
+ */
716
+ if (ks_pcie->is_am6)
717
+ return 0;
718
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
719
+ return -EINVAL;
720
+ }
721
+
722
+ irq_count = of_irq_count(intc_np);
723
+ if (!irq_count) {
724
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
725
+ ret = -EINVAL;
726
+ goto err;
727
+ }
728
+
729
+ for (i = 0; i < irq_count; i++) {
730
+ irq = irq_of_parse_and_map(intc_np, i);
731
+ if (!irq) {
732
+ ret = -EINVAL;
733
+ goto err;
734
+ }
735
+ ks_pcie->legacy_host_irqs[i] = irq;
736
+
737
+ irq_set_chained_handler_and_data(irq,
225738 ks_pcie_legacy_irq_handler,
226739 ks_pcie);
227740 }
228
- ks_dw_pcie_enable_legacy_irqs(ks_pcie);
229741
230
- /* MSI IRQ */
231
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
232
- for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
233
- irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
234
- ks_pcie_msi_irq_handler,
235
- ks_pcie);
236
- }
742
+ legacy_irq_domain =
743
+ irq_domain_add_linear(intc_np, PCI_NUM_INTX,
744
+ &ks_pcie_legacy_irq_domain_ops, NULL);
745
+ if (!legacy_irq_domain) {
746
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
747
+ ret = -EINVAL;
748
+ goto err;
237749 }
750
+ ks_pcie->legacy_irq_domain = legacy_irq_domain;
238751
239
- if (ks_pcie->error_irq > 0)
240
- ks_dw_pcie_enable_error_irq(ks_pcie);
752
+ for (i = 0; i < PCI_NUM_INTX; i++)
753
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
754
+
755
+err:
756
+ of_node_put(intc_np);
757
+ return ret;
241758 }
242759
243760 #ifdef CONFIG_ARM
....@@ -246,8 +763,8 @@
246763 * bus error instead of returning 0xffffffff. This handler always returns 0
247764 * for this kind of faults.
248765 */
249
-static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
250
- struct pt_regs *regs)
766
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
767
+ struct pt_regs *regs)
251768 {
252769 unsigned long instr = *(unsigned long *) instruction_pointer(regs);
253770
....@@ -262,100 +779,101 @@
262779 }
263780 #endif
264781
782
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
783
+{
784
+ int ret;
785
+ unsigned int id;
786
+ struct regmap *devctrl_regs;
787
+ struct dw_pcie *pci = ks_pcie->pci;
788
+ struct device *dev = pci->dev;
789
+ struct device_node *np = dev->of_node;
790
+
791
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
792
+ if (IS_ERR(devctrl_regs))
793
+ return PTR_ERR(devctrl_regs);
794
+
795
+ ret = regmap_read(devctrl_regs, 0, &id);
796
+ if (ret)
797
+ return ret;
798
+
799
+ dw_pcie_dbi_ro_wr_en(pci);
800
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
801
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
802
+ dw_pcie_dbi_ro_wr_dis(pci);
803
+
804
+ return 0;
805
+}
806
+
265807 static int __init ks_pcie_host_init(struct pcie_port *pp)
266808 {
267809 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
268810 struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
269
- u32 val;
811
+ int ret;
270812
271
- ks_pcie_establish_link(ks_pcie);
272
- ks_dw_pcie_setup_rc_app_regs(ks_pcie);
273
- ks_pcie_setup_interrupts(ks_pcie);
813
+ pp->bridge->ops = &ks_pcie_ops;
814
+ if (!ks_pcie->is_am6)
815
+ pp->bridge->child_ops = &ks_child_pcie_ops;
816
+
817
+ ret = ks_pcie_config_legacy_irq(ks_pcie);
818
+ if (ret)
819
+ return ret;
820
+
821
+ ret = ks_pcie_config_msi_irq(ks_pcie);
822
+ if (ret)
823
+ return ret;
824
+
825
+ dw_pcie_setup_rc(pp);
826
+
827
+ ks_pcie_stop_link(pci);
828
+ ks_pcie_setup_rc_app_regs(ks_pcie);
274829 writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
275830 pci->dbi_base + PCI_IO_BASE);
276831
277
- /* update the Vendor ID */
278
- writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
279
-
280
- /* update the DEV_STAT_CTRL to publish right mrrs */
281
- val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
282
- val &= ~PCI_EXP_DEVCTL_READRQ;
283
- /* set the mrrs to 256 bytes */
284
- val |= BIT(12);
285
- writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
832
+ ret = ks_pcie_init_id(ks_pcie);
833
+ if (ret < 0)
834
+ return ret;
286835
287836 #ifdef CONFIG_ARM
288837 /*
289838 * PCIe access errors that result into OCP errors are caught by ARM as
290839 * "External aborts"
291840 */
292
- hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
841
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
293842 "Asynchronous external abort");
294843 #endif
844
+
845
+ ks_pcie_start_link(pci);
846
+ dw_pcie_wait_for_link(pci);
295847
296848 return 0;
297849 }
298850
299
-static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
300
- .rd_other_conf = ks_dw_pcie_rd_other_conf,
301
- .wr_other_conf = ks_dw_pcie_wr_other_conf,
851
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
302852 .host_init = ks_pcie_host_init,
303
- .msi_set_irq = ks_dw_pcie_msi_set_irq,
304
- .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
305
- .get_msi_addr = ks_dw_pcie_get_msi_addr,
306
- .msi_host_init = ks_dw_pcie_msi_host_init,
307
- .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
308
- .scan_bus = ks_dw_pcie_v3_65_scan_bus,
853
+ .msi_host_init = ks_pcie_msi_host_init,
309854 };
310855
311
-static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
856
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
857
+ .host_init = ks_pcie_host_init,
858
+ .msi_host_init = ks_pcie_am654_msi_host_init,
859
+};
860
+
861
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
312862 {
313863 struct keystone_pcie *ks_pcie = priv;
314864
315
- return ks_dw_pcie_handle_error_irq(ks_pcie);
865
+ return ks_pcie_handle_error_irq(ks_pcie);
316866 }
317867
318
-static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
319
- struct platform_device *pdev)
868
+static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
869
+ struct platform_device *pdev)
320870 {
321871 struct dw_pcie *pci = ks_pcie->pci;
322872 struct pcie_port *pp = &pci->pp;
323873 struct device *dev = &pdev->dev;
324874 int ret;
325875
326
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
327
- "legacy-interrupt-controller",
328
- &ks_pcie->num_legacy_host_irqs);
329
- if (ret)
330
- return ret;
331
-
332
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
333
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
334
- "msi-interrupt-controller",
335
- &ks_pcie->num_msi_host_irqs);
336
- if (ret)
337
- return ret;
338
- }
339
-
340
- /*
341
- * Index 0 is the platform interrupt for error interrupt
342
- * from RC. This is optional.
343
- */
344
- ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
345
- if (ks_pcie->error_irq <= 0)
346
- dev_info(dev, "no error IRQ defined\n");
347
- else {
348
- ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
349
- IRQF_SHARED, "pcie-error-irq", ks_pcie);
350
- if (ret < 0) {
351
- dev_err(dev, "failed to request error IRQ %d\n",
352
- ks_pcie->error_irq);
353
- return ret;
354
- }
355
- }
356
-
357
- pp->ops = &keystone_pcie_host_ops;
358
- ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
876
+ ret = dw_pcie_host_init(pp);
359877 if (ret) {
360878 dev_err(dev, "failed to initialize host\n");
361879 return ret;
....@@ -364,36 +882,298 @@
364882 return 0;
365883 }
366884
367
-static const struct of_device_id ks_pcie_of_match[] = {
368
- {
369
- .type = "pci",
370
- .compatible = "ti,keystone-pcie",
371
- },
372
- { },
373
-};
374
-
375
-static const struct dw_pcie_ops dw_pcie_ops = {
376
- .link_up = ks_dw_pcie_link_up,
377
-};
378
-
379
-static int __exit ks_pcie_remove(struct platform_device *pdev)
885
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
886
+ u32 reg, size_t size, u32 val)
380887 {
381
- struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
888
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
382889
383
- clk_disable_unprepare(ks_pcie->clk);
890
+ ks_pcie_set_dbi_mode(ks_pcie);
891
+ dw_pcie_write(base + reg, size, val);
892
+ ks_pcie_clear_dbi_mode(ks_pcie);
893
+}
894
+
895
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
896
+ .start_link = ks_pcie_start_link,
897
+ .stop_link = ks_pcie_stop_link,
898
+ .link_up = ks_pcie_link_up,
899
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
900
+};
901
+
902
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
903
+{
904
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
905
+ int flags;
906
+
907
+ ep->page_size = AM654_WIN_SIZE;
908
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
909
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
910
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
911
+}
912
+
913
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
914
+{
915
+ struct dw_pcie *pci = ks_pcie->pci;
916
+ u8 int_pin;
917
+
918
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
919
+ if (int_pin == 0 || int_pin > 4)
920
+ return;
921
+
922
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
923
+ INT_ENABLE);
924
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
925
+ mdelay(1);
926
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
927
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
928
+ INT_ENABLE);
929
+}
930
+
931
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
932
+ enum pci_epc_irq_type type,
933
+ u16 interrupt_num)
934
+{
935
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
936
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
937
+
938
+ switch (type) {
939
+ case PCI_EPC_IRQ_LEGACY:
940
+ ks_pcie_am654_raise_legacy_irq(ks_pcie);
941
+ break;
942
+ case PCI_EPC_IRQ_MSI:
943
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
944
+ break;
945
+ case PCI_EPC_IRQ_MSIX:
946
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
947
+ break;
948
+ default:
949
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
950
+ return -EINVAL;
951
+ }
384952
385953 return 0;
386954 }
387955
956
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
957
+ .linkup_notifier = false,
958
+ .msi_capable = true,
959
+ .msix_capable = true,
960
+ .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
961
+ .bar_fixed_64bit = 1 << BAR_0,
962
+ .bar_fixed_size[2] = SZ_1M,
963
+ .bar_fixed_size[3] = SZ_64K,
964
+ .bar_fixed_size[4] = 256,
965
+ .bar_fixed_size[5] = SZ_1M,
966
+ .align = SZ_1M,
967
+};
968
+
969
+static const struct pci_epc_features*
970
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
971
+{
972
+ return &ks_pcie_am654_epc_features;
973
+}
974
+
975
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
976
+ .ep_init = ks_pcie_am654_ep_init,
977
+ .raise_irq = ks_pcie_am654_raise_irq,
978
+ .get_features = &ks_pcie_am654_get_features,
979
+};
980
+
981
+static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
982
+ struct platform_device *pdev)
983
+{
984
+ int ret;
985
+ struct dw_pcie_ep *ep;
986
+ struct resource *res;
987
+ struct device *dev = &pdev->dev;
988
+ struct dw_pcie *pci = ks_pcie->pci;
989
+
990
+ ep = &pci->ep;
991
+
992
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
993
+ if (!res)
994
+ return -EINVAL;
995
+
996
+ ep->phys_base = res->start;
997
+ ep->addr_size = resource_size(res);
998
+
999
+ ret = dw_pcie_ep_init(ep);
1000
+ if (ret) {
1001
+ dev_err(dev, "failed to initialize endpoint\n");
1002
+ return ret;
1003
+ }
1004
+
1005
+ return 0;
1006
+}
1007
+
1008
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
1009
+{
1010
+ int num_lanes = ks_pcie->num_lanes;
1011
+
1012
+ while (num_lanes--) {
1013
+ phy_power_off(ks_pcie->phy[num_lanes]);
1014
+ phy_exit(ks_pcie->phy[num_lanes]);
1015
+ }
1016
+}
1017
+
1018
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
1019
+{
1020
+ int i;
1021
+ int ret;
1022
+ int num_lanes = ks_pcie->num_lanes;
1023
+
1024
+ for (i = 0; i < num_lanes; i++) {
1025
+ ret = phy_reset(ks_pcie->phy[i]);
1026
+ if (ret < 0)
1027
+ goto err_phy;
1028
+
1029
+ ret = phy_init(ks_pcie->phy[i]);
1030
+ if (ret < 0)
1031
+ goto err_phy;
1032
+
1033
+ ret = phy_power_on(ks_pcie->phy[i]);
1034
+ if (ret < 0) {
1035
+ phy_exit(ks_pcie->phy[i]);
1036
+ goto err_phy;
1037
+ }
1038
+ }
1039
+
1040
+ return 0;
1041
+
1042
+err_phy:
1043
+ while (--i >= 0) {
1044
+ phy_power_off(ks_pcie->phy[i]);
1045
+ phy_exit(ks_pcie->phy[i]);
1046
+ }
1047
+
1048
+ return ret;
1049
+}
1050
+
1051
+static int ks_pcie_set_mode(struct device *dev)
1052
+{
1053
+ struct device_node *np = dev->of_node;
1054
+ struct regmap *syscon;
1055
+ u32 val;
1056
+ u32 mask;
1057
+ int ret = 0;
1058
+
1059
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1060
+ if (IS_ERR(syscon))
1061
+ return 0;
1062
+
1063
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1064
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1065
+
1066
+ ret = regmap_update_bits(syscon, 0, mask, val);
1067
+ if (ret) {
1068
+ dev_err(dev, "failed to set pcie mode\n");
1069
+ return ret;
1070
+ }
1071
+
1072
+ return 0;
1073
+}
1074
+
1075
+static int ks_pcie_am654_set_mode(struct device *dev,
1076
+ enum dw_pcie_device_mode mode)
1077
+{
1078
+ struct device_node *np = dev->of_node;
1079
+ struct regmap *syscon;
1080
+ u32 val;
1081
+ u32 mask;
1082
+ int ret = 0;
1083
+
1084
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1085
+ if (IS_ERR(syscon))
1086
+ return 0;
1087
+
1088
+ mask = AM654_PCIE_DEV_TYPE_MASK;
1089
+
1090
+ switch (mode) {
1091
+ case DW_PCIE_RC_TYPE:
1092
+ val = RC;
1093
+ break;
1094
+ case DW_PCIE_EP_TYPE:
1095
+ val = EP;
1096
+ break;
1097
+ default:
1098
+ dev_err(dev, "INVALID device type %d\n", mode);
1099
+ return -EINVAL;
1100
+ }
1101
+
1102
+ ret = regmap_update_bits(syscon, 0, mask, val);
1103
+ if (ret) {
1104
+ dev_err(dev, "failed to set pcie mode\n");
1105
+ return ret;
1106
+ }
1107
+
1108
+ return 0;
1109
+}
1110
+
1111
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1112
+ .host_ops = &ks_pcie_host_ops,
1113
+ .version = 0x365A,
1114
+};
1115
+
1116
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1117
+ .host_ops = &ks_pcie_am654_host_ops,
1118
+ .mode = DW_PCIE_RC_TYPE,
1119
+ .version = 0x490A,
1120
+};
1121
+
1122
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1123
+ .ep_ops = &ks_pcie_am654_ep_ops,
1124
+ .mode = DW_PCIE_EP_TYPE,
1125
+ .version = 0x490A,
1126
+};
1127
+
1128
+static const struct of_device_id ks_pcie_of_match[] = {
1129
+ {
1130
+ .type = "pci",
1131
+ .data = &ks_pcie_rc_of_data,
1132
+ .compatible = "ti,keystone-pcie",
1133
+ },
1134
+ {
1135
+ .data = &ks_pcie_am654_rc_of_data,
1136
+ .compatible = "ti,am654-pcie-rc",
1137
+ },
1138
+ {
1139
+ .data = &ks_pcie_am654_ep_of_data,
1140
+ .compatible = "ti,am654-pcie-ep",
1141
+ },
1142
+ { },
1143
+};
1144
+
3881145 static int __init ks_pcie_probe(struct platform_device *pdev)
3891146 {
1147
+ const struct dw_pcie_host_ops *host_ops;
1148
+ const struct dw_pcie_ep_ops *ep_ops;
3901149 struct device *dev = &pdev->dev;
1150
+ struct device_node *np = dev->of_node;
1151
+ const struct ks_pcie_of_data *data;
1152
+ const struct of_device_id *match;
1153
+ enum dw_pcie_device_mode mode;
3911154 struct dw_pcie *pci;
3921155 struct keystone_pcie *ks_pcie;
1156
+ struct device_link **link;
1157
+ struct gpio_desc *gpiod;
3931158 struct resource *res;
394
- void __iomem *reg_p;
395
- struct phy *phy;
1159
+ unsigned int version;
1160
+ void __iomem *base;
1161
+ struct phy **phy;
1162
+ u32 num_lanes;
1163
+ char name[10];
3961164 int ret;
1165
+ int irq;
1166
+ int i;
1167
+
1168
+ match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
1169
+ data = (struct ks_pcie_of_data *)match->data;
1170
+ if (!data)
1171
+ return -EINVAL;
1172
+
1173
+ version = data->version;
1174
+ host_ops = data->host_ops;
1175
+ ep_ops = data->ep_ops;
1176
+ mode = data->mode;
3971177
3981178 ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
3991179 if (!ks_pcie)
....@@ -403,55 +1183,177 @@
4031183 if (!pci)
4041184 return -ENOMEM;
4051185
1186
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1187
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1188
+ if (IS_ERR(ks_pcie->va_app_base))
1189
+ return PTR_ERR(ks_pcie->va_app_base);
1190
+
1191
+ ks_pcie->app = *res;
1192
+
1193
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1194
+ base = devm_pci_remap_cfg_resource(dev, res);
1195
+ if (IS_ERR(base))
1196
+ return PTR_ERR(base);
1197
+
1198
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1199
+ ks_pcie->is_am6 = true;
1200
+
1201
+ pci->dbi_base = base;
1202
+ pci->dbi_base2 = base;
4061203 pci->dev = dev;
407
- pci->ops = &dw_pcie_ops;
1204
+ pci->ops = &ks_pcie_dw_pcie_ops;
1205
+ pci->version = version;
4081206
409
- ks_pcie->pci = pci;
1207
+ irq = platform_get_irq(pdev, 0);
1208
+ if (irq < 0)
1209
+ return irq;
4101210
411
- /* initialize SerDes Phy if present */
412
- phy = devm_phy_get(dev, "pcie-phy");
413
- if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
414
- return PTR_ERR(phy);
415
-
416
- if (!IS_ERR_OR_NULL(phy)) {
417
- ret = phy_init(phy);
418
- if (ret < 0)
419
- return ret;
420
- }
421
-
422
- /* index 2 is to read PCI DEVICE_ID */
423
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
424
- reg_p = devm_ioremap_resource(dev, res);
425
- if (IS_ERR(reg_p))
426
- return PTR_ERR(reg_p);
427
- ks_pcie->device_id = readl(reg_p) >> 16;
428
- devm_iounmap(dev, reg_p);
429
- devm_release_mem_region(dev, res->start, resource_size(res));
430
-
431
- ks_pcie->np = dev->of_node;
432
- platform_set_drvdata(pdev, ks_pcie);
433
- ks_pcie->clk = devm_clk_get(dev, "pcie");
434
- if (IS_ERR(ks_pcie->clk)) {
435
- dev_err(dev, "Failed to get pcie rc clock\n");
436
- return PTR_ERR(ks_pcie->clk);
437
- }
438
- ret = clk_prepare_enable(ks_pcie->clk);
439
- if (ret)
1211
+ ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1212
+ "ks-pcie-error-irq", ks_pcie);
1213
+ if (ret < 0) {
1214
+ dev_err(dev, "failed to request error IRQ %d\n",
1215
+ irq);
4401216 return ret;
1217
+ }
1218
+
1219
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1220
+ if (ret)
1221
+ num_lanes = 1;
1222
+
1223
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1224
+ if (!phy)
1225
+ return -ENOMEM;
1226
+
1227
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1228
+ if (!link)
1229
+ return -ENOMEM;
1230
+
1231
+ for (i = 0; i < num_lanes; i++) {
1232
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
1233
+ phy[i] = devm_phy_optional_get(dev, name);
1234
+ if (IS_ERR(phy[i])) {
1235
+ ret = PTR_ERR(phy[i]);
1236
+ goto err_link;
1237
+ }
1238
+
1239
+ if (!phy[i])
1240
+ continue;
1241
+
1242
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1243
+ if (!link[i]) {
1244
+ ret = -EINVAL;
1245
+ goto err_link;
1246
+ }
1247
+ }
1248
+
1249
+ ks_pcie->np = np;
1250
+ ks_pcie->pci = pci;
1251
+ ks_pcie->link = link;
1252
+ ks_pcie->num_lanes = num_lanes;
1253
+ ks_pcie->phy = phy;
1254
+
1255
+ gpiod = devm_gpiod_get_optional(dev, "reset",
1256
+ GPIOD_OUT_LOW);
1257
+ if (IS_ERR(gpiod)) {
1258
+ ret = PTR_ERR(gpiod);
1259
+ if (ret != -EPROBE_DEFER)
1260
+ dev_err(dev, "Failed to get reset GPIO\n");
1261
+ goto err_link;
1262
+ }
1263
+
1264
+ ret = ks_pcie_enable_phy(ks_pcie);
1265
+ if (ret) {
1266
+ dev_err(dev, "failed to enable phy\n");
1267
+ goto err_link;
1268
+ }
4411269
4421270 platform_set_drvdata(pdev, ks_pcie);
1271
+ pm_runtime_enable(dev);
1272
+ ret = pm_runtime_get_sync(dev);
1273
+ if (ret < 0) {
1274
+ dev_err(dev, "pm_runtime_get_sync failed\n");
1275
+ goto err_get_sync;
1276
+ }
4431277
444
- ret = ks_add_pcie_port(ks_pcie, pdev);
1278
+ if (pci->version >= 0x480A)
1279
+ ret = ks_pcie_am654_set_mode(dev, mode);
1280
+ else
1281
+ ret = ks_pcie_set_mode(dev);
4451282 if (ret < 0)
446
- goto fail_clk;
1283
+ goto err_get_sync;
1284
+
1285
+ switch (mode) {
1286
+ case DW_PCIE_RC_TYPE:
1287
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1288
+ ret = -ENODEV;
1289
+ goto err_get_sync;
1290
+ }
1291
+
1292
+ /*
1293
+ * "Power Sequencing and Reset Signal Timings" table in
1294
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1295
+ * indicates PERST# should be deasserted after minimum of 100us
1296
+ * once REFCLK is stable. The REFCLK to the connector in RC
1297
+ * mode is selected while enabling the PHY. So deassert PERST#
1298
+ * after 100 us.
1299
+ */
1300
+ if (gpiod) {
1301
+ usleep_range(100, 200);
1302
+ gpiod_set_value_cansleep(gpiod, 1);
1303
+ }
1304
+
1305
+ pci->pp.ops = host_ops;
1306
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
1307
+ if (ret < 0)
1308
+ goto err_get_sync;
1309
+ break;
1310
+ case DW_PCIE_EP_TYPE:
1311
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1312
+ ret = -ENODEV;
1313
+ goto err_get_sync;
1314
+ }
1315
+
1316
+ pci->ep.ops = ep_ops;
1317
+ ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
1318
+ if (ret < 0)
1319
+ goto err_get_sync;
1320
+ break;
1321
+ default:
1322
+ dev_err(dev, "INVALID device type %d\n", mode);
1323
+ }
1324
+
1325
+ ks_pcie_enable_error_irq(ks_pcie);
4471326
4481327 return 0;
449
-fail_clk:
450
- clk_disable_unprepare(ks_pcie->clk);
1328
+
1329
+err_get_sync:
1330
+ pm_runtime_put(dev);
1331
+ pm_runtime_disable(dev);
1332
+ ks_pcie_disable_phy(ks_pcie);
1333
+
1334
+err_link:
1335
+ while (--i >= 0 && link[i])
1336
+ device_link_del(link[i]);
4511337
4521338 return ret;
4531339 }
4541340
1341
+static int __exit ks_pcie_remove(struct platform_device *pdev)
1342
+{
1343
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1344
+ struct device_link **link = ks_pcie->link;
1345
+ int num_lanes = ks_pcie->num_lanes;
1346
+ struct device *dev = &pdev->dev;
1347
+
1348
+ pm_runtime_put(dev);
1349
+ pm_runtime_disable(dev);
1350
+ ks_pcie_disable_phy(ks_pcie);
1351
+ while (num_lanes--)
1352
+ device_link_del(link[num_lanes]);
1353
+
1354
+ return 0;
1355
+}
1356
+
4551357 static struct platform_driver ks_pcie_driver __refdata = {
4561358 .probe = ks_pcie_probe,
4571359 .remove = __exit_p(ks_pcie_remove),