hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
....@@ -36,12 +36,11 @@
3636 * Global resources are common to all the netdevices crated on the same nic.
3737 */
3838
39
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
40
- struct mlx5e_tir *tir, u32 *in, int inlen)
39
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
4140 {
4241 int err;
4342
44
- err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
43
+ err = mlx5_core_create_tir(mdev, in, &tir->tirn);
4544 if (err)
4645 return err;
4746
....@@ -61,6 +60,16 @@
6160 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
6261 }
6362
63
+void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
64
+{
65
+ bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
66
+ bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
67
+ bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
68
+
69
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
70
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
71
+}
72
+
6473 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
6574 struct mlx5_core_mkey *mkey)
6675 {
....@@ -77,7 +86,7 @@
7786 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
7887 MLX5_SET(mkc, mkc, lw, 1);
7988 MLX5_SET(mkc, mkc, lr, 1);
80
-
89
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
8190 MLX5_SET(mkc, mkc, pd, pdn);
8291 MLX5_SET(mkc, mkc, length64, 1);
8392 MLX5_SET(mkc, mkc, qpn, 0xffffff);
....@@ -142,10 +151,12 @@
142151 memset(res, 0, sizeof(*res));
143152 }
144153
145
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
154
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
155
+ bool enable_mc_lb)
146156 {
147157 struct mlx5_core_dev *mdev = priv->mdev;
148158 struct mlx5e_tir *tir;
159
+ u8 lb_flags = 0;
149160 int err = 0;
150161 u32 tirn = 0;
151162 int inlen;
....@@ -159,15 +170,20 @@
159170 }
160171
161172 if (enable_uc_lb)
162
- MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
163
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
173
+ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
174
+
175
+ if (enable_mc_lb)
176
+ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
177
+
178
+ if (lb_flags)
179
+ MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
164180
165181 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
166182
167183 mutex_lock(&mdev->mlx5e_res.td.list_lock);
168184 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
169185 tirn = tir->tirn;
170
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
186
+ err = mlx5_core_modify_tir(mdev, tirn, in);
171187 if (err)
172188 goto out;
173189 }
....@@ -179,16 +195,4 @@
179195 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
180196
181197 return err;
182
-}
183
-
184
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
185
-{
186
- u8 min_inline_mode;
187
-
188
- mlx5_query_min_inline(mdev, &min_inline_mode);
189
- if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
190
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
191
- min_inline_mode = MLX5_INLINE_MODE_L2;
192
-
193
- return min_inline_mode;
194198 }