hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/infiniband/hw/mlx5/ib_rep.c
....@@ -3,78 +3,65 @@
33 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
44 */
55
6
+#include <linux/mlx5/vport.h>
67 #include "ib_rep.h"
7
-
8
-static const struct mlx5_ib_profile rep_profile = {
9
- STAGE_CREATE(MLX5_IB_STAGE_INIT,
10
- mlx5_ib_stage_init_init,
11
- mlx5_ib_stage_init_cleanup),
12
- STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
13
- mlx5_ib_stage_rep_flow_db_init,
14
- NULL),
15
- STAGE_CREATE(MLX5_IB_STAGE_CAPS,
16
- mlx5_ib_stage_caps_init,
17
- NULL),
18
- STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
19
- mlx5_ib_stage_rep_non_default_cb,
20
- NULL),
21
- STAGE_CREATE(MLX5_IB_STAGE_ROCE,
22
- mlx5_ib_stage_rep_roce_init,
23
- mlx5_ib_stage_rep_roce_cleanup),
24
- STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
25
- mlx5_ib_stage_dev_res_init,
26
- mlx5_ib_stage_dev_res_cleanup),
27
- STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
28
- mlx5_ib_stage_counters_init,
29
- mlx5_ib_stage_counters_cleanup),
30
- STAGE_CREATE(MLX5_IB_STAGE_BFREG,
31
- mlx5_ib_stage_bfrag_init,
32
- mlx5_ib_stage_bfrag_cleanup),
33
- STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
34
- NULL,
35
- mlx5_ib_stage_pre_ib_reg_umr_cleanup),
36
- STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
37
- mlx5_ib_stage_ib_reg_init,
38
- mlx5_ib_stage_ib_reg_cleanup),
39
- STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
40
- mlx5_ib_stage_post_ib_reg_umr_init,
41
- NULL),
42
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
43
- mlx5_ib_stage_class_attr_init,
44
- NULL),
45
-};
8
+#include "srq.h"
469
4710 static int
48
-mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
11
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
4912 {
50
- return 0;
51
-}
13
+ struct mlx5_ib_dev *ibdev;
14
+ int vport_index;
5215
53
-static void
54
-mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
55
-{
56
- rep->rep_if[REP_IB].priv = NULL;
16
+ ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
17
+ vport_index = rep->vport_index;
18
+
19
+ ibdev->port[vport_index].rep = rep;
20
+ rep->rep_data[REP_IB].priv = ibdev;
21
+ write_lock(&ibdev->port[vport_index].roce.netdev_lock);
22
+ ibdev->port[vport_index].roce.netdev =
23
+ mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
24
+ write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
25
+
26
+ return 0;
5727 }
5828
5929 static int
6030 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
6131 {
32
+ int num_ports = mlx5_eswitch_get_total_vports(dev);
33
+ const struct mlx5_ib_profile *profile;
6234 struct mlx5_ib_dev *ibdev;
35
+ int vport_index;
6336
64
- ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
37
+ if (rep->vport == MLX5_VPORT_UPLINK)
38
+ profile = &raw_eth_profile;
39
+ else
40
+ return mlx5_ib_set_vport_rep(dev, rep);
41
+
42
+ ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6543 if (!ibdev)
6644 return -ENOMEM;
6745
68
- ibdev->rep = rep;
69
- ibdev->mdev = dev;
70
- ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
71
- MLX5_CAP_GEN(dev, num_vhca_ports));
72
- if (!__mlx5_ib_add(ibdev, &rep_profile)) {
46
+ ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
47
+ GFP_KERNEL);
48
+ if (!ibdev->port) {
7349 ib_dealloc_device(&ibdev->ib_dev);
74
- return -EINVAL;
50
+ return -ENOMEM;
7551 }
7652
77
- rep->rep_if[REP_IB].priv = ibdev;
53
+ ibdev->is_rep = true;
54
+ vport_index = rep->vport_index;
55
+ ibdev->port[vport_index].rep = rep;
56
+ ibdev->port[vport_index].roce.netdev =
57
+ mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
58
+ ibdev->mdev = dev;
59
+ ibdev->num_ports = num_ports;
60
+
61
+ if (!__mlx5_ib_add(ibdev, profile))
62
+ return -EINVAL;
63
+
64
+ rep->rep_data[REP_IB].priv = ibdev;
7865
7966 return 0;
8067 }
....@@ -82,14 +69,18 @@
8269 static void
8370 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
8471 {
85
- struct mlx5_ib_dev *dev;
72
+ struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
73
+ struct mlx5_ib_port *port;
8674
87
- if (!rep->rep_if[REP_IB].priv)
88
- return;
75
+ port = &dev->port[rep->vport_index];
76
+ write_lock(&port->roce.netdev_lock);
77
+ port->roce.netdev = NULL;
78
+ write_unlock(&port->roce.netdev_lock);
79
+ rep->rep_data[REP_IB].priv = NULL;
80
+ port->rep = NULL;
8981
90
- dev = mlx5_ib_rep_to_dev(rep);
91
- __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
92
- rep->rep_if[REP_IB].priv = NULL;
82
+ if (rep->vport == MLX5_VPORT_UPLINK)
83
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
9384 }
9485
9586 static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
....@@ -97,53 +88,24 @@
9788 return mlx5_ib_rep_to_dev(rep);
9889 }
9990
100
-static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
91
+static const struct mlx5_eswitch_rep_ops rep_ops = {
92
+ .load = mlx5_ib_vport_rep_load,
93
+ .unload = mlx5_ib_vport_rep_unload,
94
+ .get_proto_dev = mlx5_ib_vport_get_proto_dev,
95
+};
96
+
97
+void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
10198 {
102
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
103
- int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
104
- int vport;
99
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
105100
106
- for (vport = 1; vport < total_vfs; vport++) {
107
- struct mlx5_eswitch_rep_if rep_if = {};
108
-
109
- rep_if.load = mlx5_ib_vport_rep_load;
110
- rep_if.unload = mlx5_ib_vport_rep_unload;
111
- rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
112
- mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
113
- }
101
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
114102 }
115103
116
-static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
104
+void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
117105 {
118
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
119
- int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
120
- int vport;
106
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
121107
122
- for (vport = 1; vport < total_vfs; vport++)
123
- mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
124
-}
125
-
126
-void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
127
-{
128
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
129
- struct mlx5_eswitch_rep_if rep_if = {};
130
-
131
- rep_if.load = mlx5_ib_nic_rep_load;
132
- rep_if.unload = mlx5_ib_nic_rep_unload;
133
- rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
134
- rep_if.priv = dev;
135
-
136
- mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
137
-
138
- mlx5_ib_rep_register_vf_vports(dev);
139
-}
140
-
141
-void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
142
-{
143
- struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
144
-
145
- mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
146
- mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
108
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
147109 }
148110
149111 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
....@@ -152,15 +114,15 @@
152114 }
153115
154116 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
155
- int vport_index)
117
+ u16 vport_num)
156118 {
157
- return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
119
+ return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB);
158120 }
159121
160122 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
161
- int vport_index)
123
+ u16 vport_num)
162124 {
163
- return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
125
+ return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
164126 }
165127
166128 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
....@@ -168,27 +130,27 @@
168130 return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
169131 }
170132
171
-struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
133
+struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
134
+ u16 vport_num)
172135 {
173
- return mlx5_eswitch_vport_rep(esw, vport);
136
+ return mlx5_eswitch_vport_rep(esw, vport_num);
174137 }
175138
176
-int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
177
- struct mlx5_ib_sq *sq)
139
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
140
+ struct mlx5_ib_sq *sq,
141
+ u16 port)
178142 {
179
- struct mlx5_flow_handle *flow_rule;
180143 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
144
+ struct mlx5_eswitch_rep *rep;
181145
182
- if (!dev->rep)
183
- return 0;
146
+ if (!dev->is_rep || !port)
147
+ return NULL;
184148
185
- flow_rule =
186
- mlx5_eswitch_add_send_to_vport_rule(esw,
187
- dev->rep->vport,
188
- sq->base.mqp.qpn);
189
- if (IS_ERR(flow_rule))
190
- return PTR_ERR(flow_rule);
191
- sq->flow_rule = flow_rule;
149
+ if (!dev->port[port - 1].rep)
150
+ return ERR_PTR(-EINVAL);
192151
193
- return 0;
152
+ rep = dev->port[port - 1].rep;
153
+
154
+ return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
155
+ sq->base.mqp.qpn);
194156 }