.. | .. |
---|
36 | 36 | #include "mlx5_core.h" |
---|
37 | 37 | #include "eswitch.h" |
---|
38 | 38 | |
---|
39 | | -bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) |
---|
40 | | -{ |
---|
41 | | - struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
42 | | - |
---|
43 | | - return !!sriov->num_vfs; |
---|
44 | | -} |
---|
45 | | - |
---|
46 | 39 | static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) |
---|
47 | 40 | { |
---|
48 | 41 | struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
.. | .. |
---|
81 | 74 | int err; |
---|
82 | 75 | int vf; |
---|
83 | 76 | |
---|
84 | | - if (sriov->enabled_vfs) { |
---|
85 | | - mlx5_core_warn(dev, |
---|
86 | | - "failed to enable SRIOV on device, already enabled with %d vfs\n", |
---|
87 | | - sriov->enabled_vfs); |
---|
88 | | - return -EBUSY; |
---|
89 | | - } |
---|
90 | | - |
---|
91 | 77 | if (!MLX5_ESWITCH_MANAGER(dev)) |
---|
92 | 78 | goto enable_vfs_hca; |
---|
93 | 79 | |
---|
94 | | - err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); |
---|
| 80 | + err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); |
---|
95 | 81 | if (err) { |
---|
96 | 82 | mlx5_core_warn(dev, |
---|
97 | 83 | "failed to enable eswitch SRIOV (%d)\n", err); |
---|
.. | .. |
---|
106 | 92 | continue; |
---|
107 | 93 | } |
---|
108 | 94 | sriov->vfs_ctx[vf].enabled = 1; |
---|
109 | | - sriov->enabled_vfs++; |
---|
110 | 95 | if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { |
---|
111 | 96 | err = sriov_restore_guids(dev, vf); |
---|
112 | 97 | if (err) { |
---|
.. | .. |
---|
122 | 107 | return 0; |
---|
123 | 108 | } |
---|
124 | 109 | |
---|
125 | | -static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) |
---|
| 110 | +static void |
---|
| 111 | +mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) |
---|
126 | 112 | { |
---|
127 | 113 | struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
128 | 114 | int err; |
---|
129 | 115 | int vf; |
---|
130 | 116 | |
---|
131 | | - if (!sriov->enabled_vfs) |
---|
132 | | - goto out; |
---|
133 | | - |
---|
134 | | - for (vf = 0; vf < sriov->num_vfs; vf++) { |
---|
| 117 | + for (vf = num_vfs - 1; vf >= 0; vf--) { |
---|
135 | 118 | if (!sriov->vfs_ctx[vf].enabled) |
---|
136 | 119 | continue; |
---|
137 | 120 | err = mlx5_core_disable_hca(dev, vf + 1); |
---|
.. | .. |
---|
140 | 123 | continue; |
---|
141 | 124 | } |
---|
142 | 125 | sriov->vfs_ctx[vf].enabled = 0; |
---|
143 | | - sriov->enabled_vfs--; |
---|
144 | 126 | } |
---|
145 | 127 | |
---|
146 | | -out: |
---|
147 | 128 | if (MLX5_ESWITCH_MANAGER(dev)) |
---|
148 | | - mlx5_eswitch_disable_sriov(dev->priv.eswitch); |
---|
| 129 | + mlx5_eswitch_disable(dev->priv.eswitch, clear_vf); |
---|
149 | 130 | |
---|
150 | | - if (mlx5_wait_for_vf_pages(dev)) |
---|
| 131 | + if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) |
---|
151 | 132 | mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); |
---|
152 | | -} |
---|
153 | | - |
---|
154 | | -static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs) |
---|
155 | | -{ |
---|
156 | | - struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
---|
157 | | - int err = 0; |
---|
158 | | - |
---|
159 | | - if (pci_num_vf(pdev)) { |
---|
160 | | - mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n"); |
---|
161 | | - return -EBUSY; |
---|
162 | | - } |
---|
163 | | - |
---|
164 | | - err = pci_enable_sriov(pdev, num_vfs); |
---|
165 | | - if (err) |
---|
166 | | - mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); |
---|
167 | | - |
---|
168 | | - return err; |
---|
169 | | -} |
---|
170 | | - |
---|
171 | | -static void mlx5_pci_disable_sriov(struct pci_dev *pdev) |
---|
172 | | -{ |
---|
173 | | - pci_disable_sriov(pdev); |
---|
174 | 133 | } |
---|
175 | 134 | |
---|
176 | 135 | static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) |
---|
177 | 136 | { |
---|
178 | 137 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
---|
179 | | - struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
180 | | - int err = 0; |
---|
| 138 | + int err; |
---|
181 | 139 | |
---|
182 | 140 | err = mlx5_device_enable_sriov(dev, num_vfs); |
---|
183 | 141 | if (err) { |
---|
.. | .. |
---|
185 | 143 | return err; |
---|
186 | 144 | } |
---|
187 | 145 | |
---|
188 | | - err = mlx5_pci_enable_sriov(pdev, num_vfs); |
---|
| 146 | + err = pci_enable_sriov(pdev, num_vfs); |
---|
189 | 147 | if (err) { |
---|
190 | | - mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err); |
---|
191 | | - mlx5_device_disable_sriov(dev); |
---|
192 | | - return err; |
---|
| 148 | + mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); |
---|
| 149 | + mlx5_device_disable_sriov(dev, num_vfs, true); |
---|
193 | 150 | } |
---|
194 | | - |
---|
195 | | - sriov->num_vfs = num_vfs; |
---|
196 | | - |
---|
197 | | - return 0; |
---|
| 151 | + return err; |
---|
198 | 152 | } |
---|
199 | 153 | |
---|
200 | 154 | static void mlx5_sriov_disable(struct pci_dev *pdev) |
---|
201 | 155 | { |
---|
202 | 156 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
---|
203 | | - struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
| 157 | + int num_vfs = pci_num_vf(dev->pdev); |
---|
204 | 158 | |
---|
205 | | - mlx5_pci_disable_sriov(pdev); |
---|
206 | | - mlx5_device_disable_sriov(dev); |
---|
207 | | - sriov->num_vfs = 0; |
---|
| 159 | + pci_disable_sriov(pdev); |
---|
| 160 | + mlx5_device_disable_sriov(dev, num_vfs, true); |
---|
208 | 161 | } |
---|
209 | 162 | |
---|
210 | 163 | int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) |
---|
211 | 164 | { |
---|
212 | 165 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
---|
| 166 | + struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
213 | 167 | int err = 0; |
---|
214 | 168 | |
---|
215 | 169 | mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); |
---|
216 | | - if (!mlx5_core_is_pf(dev)) |
---|
217 | | - return -EPERM; |
---|
218 | 170 | |
---|
219 | | - if (num_vfs) { |
---|
220 | | - int ret; |
---|
221 | | - |
---|
222 | | - ret = mlx5_lag_forbid(dev); |
---|
223 | | - if (ret && (ret != -ENODEV)) |
---|
224 | | - return ret; |
---|
225 | | - } |
---|
226 | | - |
---|
227 | | - if (num_vfs) { |
---|
| 171 | + if (num_vfs) |
---|
228 | 172 | err = mlx5_sriov_enable(pdev, num_vfs); |
---|
229 | | - } else { |
---|
| 173 | + else |
---|
230 | 174 | mlx5_sriov_disable(pdev); |
---|
231 | | - mlx5_lag_allow(dev); |
---|
232 | | - } |
---|
233 | 175 | |
---|
| 176 | + if (!err) |
---|
| 177 | + sriov->num_vfs = num_vfs; |
---|
234 | 178 | return err ? err : num_vfs; |
---|
235 | 179 | } |
---|
236 | 180 | |
---|
237 | 181 | int mlx5_sriov_attach(struct mlx5_core_dev *dev) |
---|
238 | 182 | { |
---|
239 | | - struct mlx5_core_sriov *sriov = &dev->priv.sriov; |
---|
240 | | - |
---|
241 | | - if (!mlx5_core_is_pf(dev) || !sriov->num_vfs) |
---|
| 183 | + if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev)) |
---|
242 | 184 | return 0; |
---|
243 | 185 | |
---|
244 | 186 | /* If sriov VFs exist in PCI level, enable them in device level */ |
---|
245 | | - return mlx5_device_enable_sriov(dev, sriov->num_vfs); |
---|
| 187 | + return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev)); |
---|
246 | 188 | } |
---|
247 | 189 | |
---|
248 | 190 | void mlx5_sriov_detach(struct mlx5_core_dev *dev) |
---|
.. | .. |
---|
250 | 192 | if (!mlx5_core_is_pf(dev)) |
---|
251 | 193 | return; |
---|
252 | 194 | |
---|
253 | | - mlx5_device_disable_sriov(dev); |
---|
| 195 | + mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); |
---|
| 196 | +} |
---|
| 197 | + |
---|
| 198 | +static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) |
---|
| 199 | +{ |
---|
| 200 | + u16 host_total_vfs; |
---|
| 201 | + const u32 *out; |
---|
| 202 | + |
---|
| 203 | + if (mlx5_core_is_ecpf_esw_manager(dev)) { |
---|
| 204 | + out = mlx5_esw_query_functions(dev); |
---|
| 205 | + |
---|
| 206 | + /* Old FW doesn't support getting total_vfs from esw func |
---|
| 207 | + * but supports getting it from pci_sriov. |
---|
| 208 | + */ |
---|
| 209 | + if (IS_ERR(out)) |
---|
| 210 | + goto done; |
---|
| 211 | + host_total_vfs = MLX5_GET(query_esw_functions_out, out, |
---|
| 212 | + host_params_context.host_total_vfs); |
---|
| 213 | + kvfree(out); |
---|
| 214 | + return host_total_vfs; |
---|
| 215 | + } |
---|
| 216 | + |
---|
| 217 | +done: |
---|
| 218 | + return pci_sriov_get_totalvfs(dev->pdev); |
---|
254 | 219 | } |
---|
255 | 220 | |
---|
256 | 221 | int mlx5_sriov_init(struct mlx5_core_dev *dev) |
---|
.. | .. |
---|
263 | 228 | return 0; |
---|
264 | 229 | |
---|
265 | 230 | total_vfs = pci_sriov_get_totalvfs(pdev); |
---|
| 231 | + sriov->max_vfs = mlx5_get_max_vfs(dev); |
---|
266 | 232 | sriov->num_vfs = pci_num_vf(pdev); |
---|
267 | 233 | sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); |
---|
268 | 234 | if (!sriov->vfs_ctx) |
---|