.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /**************************************************************************/ |
---|
2 | 3 | /* */ |
---|
3 | 4 | /* IBM System i and System p Virtual NIC Device Driver */ |
---|
.. | .. |
---|
6 | 7 | /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ |
---|
7 | 8 | /* John Allen (jallen@linux.vnet.ibm.com) */ |
---|
8 | 9 | /* */ |
---|
9 | | -/* This program is free software; you can redistribute it and/or modify */ |
---|
10 | | -/* it under the terms of the GNU General Public License as published by */ |
---|
11 | | -/* the Free Software Foundation; either version 2 of the License, or */ |
---|
12 | | -/* (at your option) any later version. */ |
---|
13 | | -/* */ |
---|
14 | | -/* This program is distributed in the hope that it will be useful, */ |
---|
15 | | -/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ |
---|
16 | | -/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ |
---|
17 | | -/* GNU General Public License for more details. */ |
---|
18 | | -/* */ |
---|
19 | | -/* You should have received a copy of the GNU General Public License */ |
---|
20 | | -/* along with this program. */ |
---|
21 | 10 | /* */ |
---|
22 | 11 | /* This module contains the implementation of a virtual ethernet device */ |
---|
23 | 12 | /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ |
---|
.. | .. |
---|
108 | 97 | static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, |
---|
109 | 98 | struct ibmvnic_sub_crq_queue *); |
---|
110 | 99 | static int ibmvnic_poll(struct napi_struct *napi, int data); |
---|
111 | | -static void send_map_query(struct ibmvnic_adapter *adapter); |
---|
| 100 | +static void send_query_map(struct ibmvnic_adapter *adapter); |
---|
112 | 101 | static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); |
---|
113 | 102 | static int send_request_unmap(struct ibmvnic_adapter *, u8); |
---|
114 | 103 | static int send_login(struct ibmvnic_adapter *adapter); |
---|
115 | | -static void send_cap_queries(struct ibmvnic_adapter *adapter); |
---|
| 104 | +static void send_query_cap(struct ibmvnic_adapter *adapter); |
---|
116 | 105 | static int init_sub_crqs(struct ibmvnic_adapter *); |
---|
117 | 106 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); |
---|
118 | | -static int ibmvnic_init(struct ibmvnic_adapter *); |
---|
119 | | -static int ibmvnic_reset_init(struct ibmvnic_adapter *); |
---|
| 107 | +static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset); |
---|
120 | 108 | static void release_crq_queue(struct ibmvnic_adapter *); |
---|
121 | | -static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); |
---|
| 109 | +static int __ibmvnic_set_mac(struct net_device *, u8 *); |
---|
122 | 110 | static int init_crq_queue(struct ibmvnic_adapter *adapter); |
---|
| 111 | +static int send_query_phys_parms(struct ibmvnic_adapter *adapter); |
---|
123 | 112 | |
---|
124 | 113 | struct ibmvnic_stat { |
---|
125 | 114 | char name[ETH_GSTRING_LEN]; |
---|
.. | .. |
---|
128 | 117 | |
---|
129 | 118 | #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ |
---|
130 | 119 | offsetof(struct ibmvnic_statistics, stat)) |
---|
131 | | -#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) |
---|
| 120 | +#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) |
---|
132 | 121 | |
---|
133 | 122 | static const struct ibmvnic_stat ibmvnic_stats[] = { |
---|
134 | 123 | {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, |
---|
.. | .. |
---|
169 | 158 | return rc; |
---|
170 | 159 | } |
---|
171 | 160 | |
---|
| 161 | +/** |
---|
| 162 | + * ibmvnic_wait_for_completion - Check device state and wait for completion |
---|
| 163 | + * @adapter: private device data |
---|
| 164 | + * @comp_done: completion structure to wait for |
---|
| 165 | + * @timeout: time to wait in milliseconds |
---|
| 166 | + * |
---|
| 167 | + * Wait for a completion signal or until the timeout limit is reached |
---|
| 168 | + * while checking that the device is still active. |
---|
| 169 | + */ |
---|
| 170 | +static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, |
---|
| 171 | + struct completion *comp_done, |
---|
| 172 | + unsigned long timeout) |
---|
| 173 | +{ |
---|
| 174 | + struct net_device *netdev; |
---|
| 175 | + unsigned long div_timeout; |
---|
| 176 | + u8 retry; |
---|
| 177 | + |
---|
| 178 | + netdev = adapter->netdev; |
---|
| 179 | + retry = 5; |
---|
| 180 | + div_timeout = msecs_to_jiffies(timeout / retry); |
---|
| 181 | + while (true) { |
---|
| 182 | + if (!adapter->crq.active) { |
---|
| 183 | + netdev_err(netdev, "Device down!\n"); |
---|
| 184 | + return -ENODEV; |
---|
| 185 | + } |
---|
| 186 | + if (!retry--) |
---|
| 187 | + break; |
---|
| 188 | + if (wait_for_completion_timeout(comp_done, div_timeout)) |
---|
| 189 | + return 0; |
---|
| 190 | + } |
---|
| 191 | + netdev_err(netdev, "Operation timed out.\n"); |
---|
| 192 | + return -ETIMEDOUT; |
---|
| 193 | +} |
---|
| 194 | + |
---|
172 | 195 | static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, |
---|
173 | 196 | struct ibmvnic_long_term_buff *ltb, int size) |
---|
174 | 197 | { |
---|
.. | .. |
---|
186 | 209 | ltb->map_id = adapter->map_id; |
---|
187 | 210 | adapter->map_id++; |
---|
188 | 211 | |
---|
189 | | - init_completion(&adapter->fw_done); |
---|
190 | | - rc = send_request_map(adapter, ltb->addr, |
---|
191 | | - ltb->size, ltb->map_id); |
---|
| 212 | + mutex_lock(&adapter->fw_lock); |
---|
| 213 | + adapter->fw_done_rc = 0; |
---|
| 214 | + reinit_completion(&adapter->fw_done); |
---|
| 215 | + |
---|
| 216 | + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); |
---|
192 | 217 | if (rc) { |
---|
193 | | - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
---|
194 | | - return rc; |
---|
| 218 | + dev_err(dev, "send_request_map failed, rc = %d\n", rc); |
---|
| 219 | + goto out; |
---|
195 | 220 | } |
---|
196 | | - wait_for_completion(&adapter->fw_done); |
---|
| 221 | + |
---|
| 222 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
| 223 | + if (rc) { |
---|
| 224 | + dev_err(dev, |
---|
| 225 | + "Long term map request aborted or timed out,rc = %d\n", |
---|
| 226 | + rc); |
---|
| 227 | + goto out; |
---|
| 228 | + } |
---|
197 | 229 | |
---|
198 | 230 | if (adapter->fw_done_rc) { |
---|
199 | 231 | dev_err(dev, "Couldn't map long term buffer,rc = %d\n", |
---|
200 | 232 | adapter->fw_done_rc); |
---|
201 | | - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
---|
202 | | - return -1; |
---|
| 233 | + rc = -1; |
---|
| 234 | + goto out; |
---|
203 | 235 | } |
---|
204 | | - return 0; |
---|
| 236 | + rc = 0; |
---|
| 237 | +out: |
---|
| 238 | + if (rc) { |
---|
| 239 | + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
---|
| 240 | + ltb->buff = NULL; |
---|
| 241 | + } |
---|
| 242 | + mutex_unlock(&adapter->fw_lock); |
---|
| 243 | + return rc; |
---|
205 | 244 | } |
---|
206 | 245 | |
---|
207 | 246 | static void free_long_term_buff(struct ibmvnic_adapter *adapter, |
---|
.. | .. |
---|
221 | 260 | adapter->reset_reason != VNIC_RESET_TIMEOUT) |
---|
222 | 261 | send_request_unmap(adapter, ltb->map_id); |
---|
223 | 262 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
---|
| 263 | + ltb->buff = NULL; |
---|
| 264 | + ltb->map_id = 0; |
---|
224 | 265 | } |
---|
225 | 266 | |
---|
226 | 267 | static int reset_long_term_buff(struct ibmvnic_adapter *adapter, |
---|
227 | 268 | struct ibmvnic_long_term_buff *ltb) |
---|
228 | 269 | { |
---|
| 270 | + struct device *dev = &adapter->vdev->dev; |
---|
229 | 271 | int rc; |
---|
230 | 272 | |
---|
231 | 273 | memset(ltb->buff, 0, ltb->size); |
---|
232 | 274 | |
---|
233 | | - init_completion(&adapter->fw_done); |
---|
| 275 | + mutex_lock(&adapter->fw_lock); |
---|
| 276 | + adapter->fw_done_rc = 0; |
---|
| 277 | + |
---|
| 278 | + reinit_completion(&adapter->fw_done); |
---|
234 | 279 | rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); |
---|
235 | | - if (rc) |
---|
| 280 | + if (rc) { |
---|
| 281 | + mutex_unlock(&adapter->fw_lock); |
---|
236 | 282 | return rc; |
---|
237 | | - wait_for_completion(&adapter->fw_done); |
---|
| 283 | + } |
---|
| 284 | + |
---|
| 285 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
| 286 | + if (rc) { |
---|
| 287 | + dev_info(dev, |
---|
| 288 | + "Reset failed, long term map request timed out or aborted\n"); |
---|
| 289 | + mutex_unlock(&adapter->fw_lock); |
---|
| 290 | + return rc; |
---|
| 291 | + } |
---|
238 | 292 | |
---|
239 | 293 | if (adapter->fw_done_rc) { |
---|
240 | | - dev_info(&adapter->vdev->dev, |
---|
| 294 | + dev_info(dev, |
---|
241 | 295 | "Reset failed, attempting to free and reallocate buffer\n"); |
---|
242 | 296 | free_long_term_buff(adapter, ltb); |
---|
| 297 | + mutex_unlock(&adapter->fw_lock); |
---|
243 | 298 | return alloc_long_term_buff(adapter, ltb, ltb->size); |
---|
244 | 299 | } |
---|
| 300 | + mutex_unlock(&adapter->fw_lock); |
---|
245 | 301 | return 0; |
---|
246 | 302 | } |
---|
247 | 303 | |
---|
.. | .. |
---|
249 | 305 | { |
---|
250 | 306 | int i; |
---|
251 | 307 | |
---|
252 | | - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
253 | | - i++) |
---|
| 308 | + for (i = 0; i < adapter->num_active_rx_pools; i++) |
---|
254 | 309 | adapter->rx_pool[i].active = 0; |
---|
255 | 310 | } |
---|
256 | 311 | |
---|
.. | .. |
---|
258 | 313 | struct ibmvnic_rx_pool *pool) |
---|
259 | 314 | { |
---|
260 | 315 | int count = pool->size - atomic_read(&pool->available); |
---|
| 316 | + u64 handle = adapter->rx_scrq[pool->index]->handle; |
---|
261 | 317 | struct device *dev = &adapter->vdev->dev; |
---|
262 | 318 | int buffers_added = 0; |
---|
263 | 319 | unsigned long lpar_rc; |
---|
.. | .. |
---|
266 | 322 | unsigned int offset; |
---|
267 | 323 | dma_addr_t dma_addr; |
---|
268 | 324 | unsigned char *dst; |
---|
269 | | - u64 *handle_array; |
---|
270 | 325 | int shift = 0; |
---|
271 | 326 | int index; |
---|
272 | 327 | int i; |
---|
273 | 328 | |
---|
274 | 329 | if (!pool->active) |
---|
275 | 330 | return; |
---|
276 | | - |
---|
277 | | - handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
278 | | - be32_to_cpu(adapter->login_rsp_buf-> |
---|
279 | | - off_rxadd_subcrqs)); |
---|
280 | 331 | |
---|
281 | 332 | for (i = 0; i < count; ++i) { |
---|
282 | 333 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); |
---|
.. | .. |
---|
321 | 372 | #endif |
---|
322 | 373 | sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); |
---|
323 | 374 | |
---|
324 | | - lpar_rc = send_subcrq(adapter, handle_array[pool->index], |
---|
325 | | - &sub_crq); |
---|
| 375 | + lpar_rc = send_subcrq(adapter, handle, &sub_crq); |
---|
326 | 376 | if (lpar_rc != H_SUCCESS) |
---|
327 | 377 | goto failure; |
---|
328 | 378 | |
---|
.. | .. |
---|
359 | 409 | int i; |
---|
360 | 410 | |
---|
361 | 411 | adapter->replenish_task_cycles++; |
---|
362 | | - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
363 | | - i++) { |
---|
| 412 | + for (i = 0; i < adapter->num_active_rx_pools; i++) { |
---|
364 | 413 | if (adapter->rx_pool[i].active) |
---|
365 | 414 | replenish_rx_pool(adapter, &adapter->rx_pool[i]); |
---|
366 | 415 | } |
---|
| 416 | + |
---|
| 417 | + netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); |
---|
367 | 418 | } |
---|
368 | 419 | |
---|
369 | 420 | static void release_stats_buffers(struct ibmvnic_adapter *adapter) |
---|
.. | .. |
---|
427 | 478 | static int reset_rx_pools(struct ibmvnic_adapter *adapter) |
---|
428 | 479 | { |
---|
429 | 480 | struct ibmvnic_rx_pool *rx_pool; |
---|
| 481 | + u64 buff_size; |
---|
430 | 482 | int rx_scrqs; |
---|
431 | 483 | int i, j, rc; |
---|
432 | | - u64 *size_array; |
---|
433 | 484 | |
---|
434 | | - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
435 | | - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
---|
| 485 | + if (!adapter->rx_pool) |
---|
| 486 | + return -1; |
---|
436 | 487 | |
---|
437 | | - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
| 488 | + buff_size = adapter->cur_rx_buf_sz; |
---|
| 489 | + rx_scrqs = adapter->num_active_rx_pools; |
---|
438 | 490 | for (i = 0; i < rx_scrqs; i++) { |
---|
439 | 491 | rx_pool = &adapter->rx_pool[i]; |
---|
440 | 492 | |
---|
441 | 493 | netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); |
---|
442 | 494 | |
---|
443 | | - if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { |
---|
| 495 | + if (rx_pool->buff_size != buff_size) { |
---|
444 | 496 | free_long_term_buff(adapter, &rx_pool->long_term_buff); |
---|
445 | | - rx_pool->buff_size = be64_to_cpu(size_array[i]); |
---|
| 497 | + rx_pool->buff_size = buff_size; |
---|
446 | 498 | rc = alloc_long_term_buff(adapter, |
---|
447 | 499 | &rx_pool->long_term_buff, |
---|
448 | 500 | rx_pool->size * |
---|
.. | .. |
---|
510 | 562 | struct device *dev = &adapter->vdev->dev; |
---|
511 | 563 | struct ibmvnic_rx_pool *rx_pool; |
---|
512 | 564 | int rxadd_subcrqs; |
---|
513 | | - u64 *size_array; |
---|
| 565 | + u64 buff_size; |
---|
514 | 566 | int i, j; |
---|
515 | 567 | |
---|
516 | | - rxadd_subcrqs = |
---|
517 | | - be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
518 | | - size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
519 | | - be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
---|
| 568 | + rxadd_subcrqs = adapter->num_active_rx_scrqs; |
---|
| 569 | + buff_size = adapter->cur_rx_buf_sz; |
---|
520 | 570 | |
---|
521 | 571 | adapter->rx_pool = kcalloc(rxadd_subcrqs, |
---|
522 | 572 | sizeof(struct ibmvnic_rx_pool), |
---|
.. | .. |
---|
534 | 584 | netdev_dbg(adapter->netdev, |
---|
535 | 585 | "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", |
---|
536 | 586 | i, adapter->req_rx_add_entries_per_subcrq, |
---|
537 | | - be64_to_cpu(size_array[i])); |
---|
| 587 | + buff_size); |
---|
538 | 588 | |
---|
539 | 589 | rx_pool->size = adapter->req_rx_add_entries_per_subcrq; |
---|
540 | 590 | rx_pool->index = i; |
---|
541 | | - rx_pool->buff_size = be64_to_cpu(size_array[i]); |
---|
| 591 | + rx_pool->buff_size = buff_size; |
---|
542 | 592 | rx_pool->active = 1; |
---|
543 | 593 | |
---|
544 | 594 | rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), |
---|
.. | .. |
---|
601 | 651 | int tx_scrqs; |
---|
602 | 652 | int i, rc; |
---|
603 | 653 | |
---|
604 | | - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
---|
| 654 | + if (!adapter->tx_pool) |
---|
| 655 | + return -1; |
---|
| 656 | + |
---|
| 657 | + tx_scrqs = adapter->num_active_tx_pools; |
---|
605 | 658 | for (i = 0; i < tx_scrqs; i++) { |
---|
606 | 659 | rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); |
---|
607 | 660 | if (rc) |
---|
.. | .. |
---|
690 | 743 | int tx_subcrqs; |
---|
691 | 744 | int i, rc; |
---|
692 | 745 | |
---|
693 | | - tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
---|
| 746 | + tx_subcrqs = adapter->num_active_tx_scrqs; |
---|
694 | 747 | adapter->tx_pool = kcalloc(tx_subcrqs, |
---|
695 | 748 | sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); |
---|
696 | 749 | if (!adapter->tx_pool) |
---|
.. | .. |
---|
782 | 835 | return; |
---|
783 | 836 | |
---|
784 | 837 | for (i = 0; i < adapter->num_active_rx_napi; i++) { |
---|
785 | | - if (&adapter->napi[i]) { |
---|
786 | | - netdev_dbg(adapter->netdev, |
---|
787 | | - "Releasing napi[%d]\n", i); |
---|
788 | | - netif_napi_del(&adapter->napi[i]); |
---|
789 | | - } |
---|
| 838 | + netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); |
---|
| 839 | + netif_napi_del(&adapter->napi[i]); |
---|
790 | 840 | } |
---|
791 | 841 | |
---|
792 | 842 | kfree(adapter->napi); |
---|
.. | .. |
---|
798 | 848 | static int ibmvnic_login(struct net_device *netdev) |
---|
799 | 849 | { |
---|
800 | 850 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
801 | | - unsigned long timeout = msecs_to_jiffies(30000); |
---|
| 851 | + unsigned long timeout = msecs_to_jiffies(20000); |
---|
802 | 852 | int retry_count = 0; |
---|
803 | 853 | int retries = 10; |
---|
804 | 854 | bool retry; |
---|
.. | .. |
---|
814 | 864 | adapter->init_done_rc = 0; |
---|
815 | 865 | reinit_completion(&adapter->init_done); |
---|
816 | 866 | rc = send_login(adapter); |
---|
817 | | - if (rc) { |
---|
818 | | - netdev_warn(netdev, "Unable to login\n"); |
---|
| 867 | + if (rc) |
---|
819 | 868 | return rc; |
---|
820 | | - } |
---|
821 | 869 | |
---|
822 | 870 | if (!wait_for_completion_timeout(&adapter->init_done, |
---|
823 | 871 | timeout)) { |
---|
.. | .. |
---|
846 | 894 | "Received partial success, retrying...\n"); |
---|
847 | 895 | adapter->init_done_rc = 0; |
---|
848 | 896 | reinit_completion(&adapter->init_done); |
---|
849 | | - send_cap_queries(adapter); |
---|
| 897 | + send_query_cap(adapter); |
---|
850 | 898 | if (!wait_for_completion_timeout(&adapter->init_done, |
---|
851 | 899 | timeout)) { |
---|
852 | 900 | netdev_warn(netdev, |
---|
.. | .. |
---|
873 | 921 | } |
---|
874 | 922 | } while (retry); |
---|
875 | 923 | |
---|
876 | | - /* handle pending MAC address changes after successful login */ |
---|
877 | | - if (adapter->mac_change_pending) { |
---|
878 | | - __ibmvnic_set_mac(netdev, &adapter->desired.mac); |
---|
879 | | - adapter->mac_change_pending = false; |
---|
880 | | - } |
---|
| 924 | + __ibmvnic_set_mac(netdev, adapter->mac_addr); |
---|
881 | 925 | |
---|
| 926 | + netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state); |
---|
882 | 927 | return 0; |
---|
883 | 928 | } |
---|
884 | 929 | |
---|
885 | 930 | static void release_login_buffer(struct ibmvnic_adapter *adapter) |
---|
886 | 931 | { |
---|
| 932 | + if (!adapter->login_buf) |
---|
| 933 | + return; |
---|
| 934 | + |
---|
| 935 | + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, |
---|
| 936 | + adapter->login_buf_sz, DMA_TO_DEVICE); |
---|
887 | 937 | kfree(adapter->login_buf); |
---|
888 | 938 | adapter->login_buf = NULL; |
---|
889 | 939 | } |
---|
890 | 940 | |
---|
891 | 941 | static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) |
---|
892 | 942 | { |
---|
| 943 | + if (!adapter->login_rsp_buf) |
---|
| 944 | + return; |
---|
| 945 | + |
---|
| 946 | + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, |
---|
| 947 | + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); |
---|
893 | 948 | kfree(adapter->login_rsp_buf); |
---|
894 | 949 | adapter->login_rsp_buf = NULL; |
---|
895 | 950 | } |
---|
.. | .. |
---|
902 | 957 | release_rx_pools(adapter); |
---|
903 | 958 | |
---|
904 | 959 | release_napi(adapter); |
---|
| 960 | + release_login_buffer(adapter); |
---|
905 | 961 | release_login_rsp_buffer(adapter); |
---|
906 | 962 | } |
---|
907 | 963 | |
---|
908 | 964 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
---|
909 | 965 | { |
---|
910 | 966 | struct net_device *netdev = adapter->netdev; |
---|
911 | | - unsigned long timeout = msecs_to_jiffies(30000); |
---|
| 967 | + unsigned long timeout = msecs_to_jiffies(20000); |
---|
912 | 968 | union ibmvnic_crq crq; |
---|
913 | 969 | bool resend; |
---|
914 | 970 | int rc; |
---|
.. | .. |
---|
936 | 992 | return -1; |
---|
937 | 993 | } |
---|
938 | 994 | |
---|
939 | | - if (adapter->init_done_rc == 1) { |
---|
| 995 | + if (adapter->init_done_rc == PARTIALSUCCESS) { |
---|
940 | 996 | /* Partuial success, delay and re-send */ |
---|
941 | 997 | mdelay(1000); |
---|
942 | 998 | resend = true; |
---|
.. | .. |
---|
981 | 1037 | if (adapter->vpd->buff) |
---|
982 | 1038 | len = adapter->vpd->len; |
---|
983 | 1039 | |
---|
984 | | - init_completion(&adapter->fw_done); |
---|
| 1040 | + mutex_lock(&adapter->fw_lock); |
---|
| 1041 | + adapter->fw_done_rc = 0; |
---|
| 1042 | + reinit_completion(&adapter->fw_done); |
---|
| 1043 | + |
---|
985 | 1044 | crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; |
---|
986 | 1045 | crq.get_vpd_size.cmd = GET_VPD_SIZE; |
---|
987 | 1046 | rc = ibmvnic_send_crq(adapter, &crq); |
---|
988 | | - if (rc) |
---|
| 1047 | + if (rc) { |
---|
| 1048 | + mutex_unlock(&adapter->fw_lock); |
---|
989 | 1049 | return rc; |
---|
990 | | - wait_for_completion(&adapter->fw_done); |
---|
| 1050 | + } |
---|
| 1051 | + |
---|
| 1052 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
| 1053 | + if (rc) { |
---|
| 1054 | + dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); |
---|
| 1055 | + mutex_unlock(&adapter->fw_lock); |
---|
| 1056 | + return rc; |
---|
| 1057 | + } |
---|
| 1058 | + mutex_unlock(&adapter->fw_lock); |
---|
991 | 1059 | |
---|
992 | 1060 | if (!adapter->vpd->len) |
---|
993 | 1061 | return -ENODATA; |
---|
.. | .. |
---|
1014 | 1082 | return -ENOMEM; |
---|
1015 | 1083 | } |
---|
1016 | 1084 | |
---|
| 1085 | + mutex_lock(&adapter->fw_lock); |
---|
| 1086 | + adapter->fw_done_rc = 0; |
---|
1017 | 1087 | reinit_completion(&adapter->fw_done); |
---|
| 1088 | + |
---|
1018 | 1089 | crq.get_vpd.first = IBMVNIC_CRQ_CMD; |
---|
1019 | 1090 | crq.get_vpd.cmd = GET_VPD; |
---|
1020 | 1091 | crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); |
---|
.. | .. |
---|
1023 | 1094 | if (rc) { |
---|
1024 | 1095 | kfree(adapter->vpd->buff); |
---|
1025 | 1096 | adapter->vpd->buff = NULL; |
---|
| 1097 | + mutex_unlock(&adapter->fw_lock); |
---|
1026 | 1098 | return rc; |
---|
1027 | 1099 | } |
---|
1028 | | - wait_for_completion(&adapter->fw_done); |
---|
1029 | 1100 | |
---|
| 1101 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
| 1102 | + if (rc) { |
---|
| 1103 | + dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); |
---|
| 1104 | + kfree(adapter->vpd->buff); |
---|
| 1105 | + adapter->vpd->buff = NULL; |
---|
| 1106 | + mutex_unlock(&adapter->fw_lock); |
---|
| 1107 | + return rc; |
---|
| 1108 | + } |
---|
| 1109 | + |
---|
| 1110 | + mutex_unlock(&adapter->fw_lock); |
---|
1030 | 1111 | return 0; |
---|
1031 | 1112 | } |
---|
1032 | 1113 | |
---|
.. | .. |
---|
1056 | 1137 | if (rc) |
---|
1057 | 1138 | return rc; |
---|
1058 | 1139 | |
---|
1059 | | - send_map_query(adapter); |
---|
| 1140 | + send_query_map(adapter); |
---|
1060 | 1141 | |
---|
1061 | 1142 | rc = init_rx_pools(netdev); |
---|
1062 | 1143 | if (rc) |
---|
.. | .. |
---|
1127 | 1208 | if (adapter->state != VNIC_CLOSED) { |
---|
1128 | 1209 | rc = ibmvnic_login(netdev); |
---|
1129 | 1210 | if (rc) |
---|
1130 | | - return rc; |
---|
| 1211 | + goto out; |
---|
1131 | 1212 | |
---|
1132 | 1213 | rc = init_resources(adapter); |
---|
1133 | 1214 | if (rc) { |
---|
1134 | 1215 | netdev_err(netdev, "failed to initialize resources\n"); |
---|
1135 | 1216 | release_resources(adapter); |
---|
1136 | | - return rc; |
---|
| 1217 | + goto out; |
---|
1137 | 1218 | } |
---|
1138 | 1219 | } |
---|
1139 | 1220 | |
---|
1140 | 1221 | rc = __ibmvnic_open(netdev); |
---|
1141 | | - netif_carrier_on(netdev); |
---|
1142 | 1222 | |
---|
| 1223 | +out: |
---|
| 1224 | + /* |
---|
| 1225 | + * If open fails due to a pending failover, set device state and |
---|
| 1226 | + * return. Device operation will be handled by reset routine. |
---|
| 1227 | + */ |
---|
| 1228 | + if (rc && adapter->failover_pending) { |
---|
| 1229 | + adapter->state = VNIC_OPEN; |
---|
| 1230 | + rc = 0; |
---|
| 1231 | + } |
---|
1143 | 1232 | return rc; |
---|
1144 | 1233 | } |
---|
1145 | 1234 | |
---|
.. | .. |
---|
1245 | 1334 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
1246 | 1335 | |
---|
1247 | 1336 | /* ensure that transmissions are stopped if called by do_reset */ |
---|
1248 | | - if (adapter->resetting) |
---|
| 1337 | + if (test_bit(0, &adapter->resetting)) |
---|
1249 | 1338 | netif_tx_disable(netdev); |
---|
1250 | 1339 | else |
---|
1251 | 1340 | netif_tx_stop_all_queues(netdev); |
---|
.. | .. |
---|
1272 | 1361 | { |
---|
1273 | 1362 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
1274 | 1363 | int rc; |
---|
| 1364 | + |
---|
| 1365 | + netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n", |
---|
| 1366 | + adapter->state, adapter->failover_pending, |
---|
| 1367 | + adapter->force_reset_recovery); |
---|
1275 | 1368 | |
---|
1276 | 1369 | /* If device failover is pending, just set device state and return. |
---|
1277 | 1370 | * Device operation will be handled by reset routine. |
---|
.. | .. |
---|
1459 | 1552 | unsigned int offset; |
---|
1460 | 1553 | int num_entries = 1; |
---|
1461 | 1554 | unsigned char *dst; |
---|
1462 | | - u64 *handle_array; |
---|
1463 | 1555 | int index = 0; |
---|
1464 | 1556 | u8 proto = 0; |
---|
| 1557 | + u64 handle; |
---|
1465 | 1558 | netdev_tx_t ret = NETDEV_TX_OK; |
---|
1466 | 1559 | |
---|
1467 | | - if (adapter->resetting) { |
---|
1468 | | - if (!netif_subqueue_stopped(netdev, skb)) |
---|
1469 | | - netif_stop_subqueue(netdev, queue_num); |
---|
| 1560 | + if (test_bit(0, &adapter->resetting)) { |
---|
1470 | 1561 | dev_kfree_skb_any(skb); |
---|
1471 | 1562 | |
---|
1472 | 1563 | tx_send_failed++; |
---|
.. | .. |
---|
1488 | 1579 | |
---|
1489 | 1580 | tx_scrq = adapter->tx_scrq[queue_num]; |
---|
1490 | 1581 | txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); |
---|
1491 | | - handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
1492 | | - be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); |
---|
| 1582 | + handle = tx_scrq->handle; |
---|
1493 | 1583 | |
---|
1494 | 1584 | index = tx_pool->free_map[tx_pool->consumer_index]; |
---|
1495 | 1585 | |
---|
.. | .. |
---|
1521 | 1611 | |
---|
1522 | 1612 | memcpy(dst + cur, |
---|
1523 | 1613 | page_address(skb_frag_page(frag)) + |
---|
1524 | | - frag->page_offset, skb_frag_size(frag)); |
---|
| 1614 | + skb_frag_off(frag), skb_frag_size(frag)); |
---|
1525 | 1615 | cur += skb_frag_size(frag); |
---|
1526 | 1616 | } |
---|
1527 | 1617 | } else { |
---|
.. | .. |
---|
1604 | 1694 | ret = NETDEV_TX_OK; |
---|
1605 | 1695 | goto tx_err_out; |
---|
1606 | 1696 | } |
---|
1607 | | - lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], |
---|
| 1697 | + lpar_rc = send_subcrq_indirect(adapter, handle, |
---|
1608 | 1698 | (u64)tx_buff->indir_dma, |
---|
1609 | 1699 | (u64)num_entries); |
---|
1610 | 1700 | dma_unmap_single(dev, tx_buff->indir_dma, |
---|
1611 | 1701 | sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); |
---|
1612 | 1702 | } else { |
---|
1613 | 1703 | tx_buff->num_entries = num_entries; |
---|
1614 | | - lpar_rc = send_subcrq(adapter, handle_array[queue_num], |
---|
| 1704 | + lpar_rc = send_subcrq(adapter, handle, |
---|
1615 | 1705 | &tx_crq); |
---|
1616 | 1706 | } |
---|
1617 | 1707 | if (lpar_rc != H_SUCCESS) { |
---|
.. | .. |
---|
1712 | 1802 | } |
---|
1713 | 1803 | } |
---|
1714 | 1804 | |
---|
1715 | | -static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) |
---|
| 1805 | +static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) |
---|
1716 | 1806 | { |
---|
1717 | 1807 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
1718 | | - struct sockaddr *addr = p; |
---|
1719 | 1808 | union ibmvnic_crq crq; |
---|
1720 | 1809 | int rc; |
---|
1721 | 1810 | |
---|
1722 | | - if (!is_valid_ether_addr(addr->sa_data)) |
---|
1723 | | - return -EADDRNOTAVAIL; |
---|
| 1811 | + if (!is_valid_ether_addr(dev_addr)) { |
---|
| 1812 | + rc = -EADDRNOTAVAIL; |
---|
| 1813 | + goto err; |
---|
| 1814 | + } |
---|
1724 | 1815 | |
---|
1725 | 1816 | memset(&crq, 0, sizeof(crq)); |
---|
1726 | 1817 | crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; |
---|
1727 | 1818 | crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; |
---|
1728 | | - ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); |
---|
| 1819 | + ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); |
---|
1729 | 1820 | |
---|
1730 | | - init_completion(&adapter->fw_done); |
---|
| 1821 | + mutex_lock(&adapter->fw_lock); |
---|
| 1822 | + adapter->fw_done_rc = 0; |
---|
| 1823 | + reinit_completion(&adapter->fw_done); |
---|
| 1824 | + |
---|
1731 | 1825 | rc = ibmvnic_send_crq(adapter, &crq); |
---|
1732 | | - if (rc) |
---|
1733 | | - return rc; |
---|
1734 | | - wait_for_completion(&adapter->fw_done); |
---|
| 1826 | + if (rc) { |
---|
| 1827 | + rc = -EIO; |
---|
| 1828 | + mutex_unlock(&adapter->fw_lock); |
---|
| 1829 | + goto err; |
---|
| 1830 | + } |
---|
| 1831 | + |
---|
| 1832 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
1735 | 1833 | /* netdev->dev_addr is changed in handle_change_mac_rsp function */ |
---|
1736 | | - return adapter->fw_done_rc ? -EIO : 0; |
---|
| 1834 | + if (rc || adapter->fw_done_rc) { |
---|
| 1835 | + rc = -EIO; |
---|
| 1836 | + mutex_unlock(&adapter->fw_lock); |
---|
| 1837 | + goto err; |
---|
| 1838 | + } |
---|
| 1839 | + mutex_unlock(&adapter->fw_lock); |
---|
| 1840 | + return 0; |
---|
| 1841 | +err: |
---|
| 1842 | + ether_addr_copy(adapter->mac_addr, netdev->dev_addr); |
---|
| 1843 | + return rc; |
---|
1737 | 1844 | } |
---|
1738 | 1845 | |
---|
1739 | 1846 | static int ibmvnic_set_mac(struct net_device *netdev, void *p) |
---|
.. | .. |
---|
1742 | 1849 | struct sockaddr *addr = p; |
---|
1743 | 1850 | int rc; |
---|
1744 | 1851 | |
---|
1745 | | - if (adapter->state == VNIC_PROBED) { |
---|
1746 | | - memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); |
---|
1747 | | - adapter->mac_change_pending = true; |
---|
1748 | | - return 0; |
---|
| 1852 | + rc = 0; |
---|
| 1853 | + if (!is_valid_ether_addr(addr->sa_data)) |
---|
| 1854 | + return -EADDRNOTAVAIL; |
---|
| 1855 | + |
---|
| 1856 | + ether_addr_copy(adapter->mac_addr, addr->sa_data); |
---|
| 1857 | + if (adapter->state != VNIC_PROBED) |
---|
| 1858 | + rc = __ibmvnic_set_mac(netdev, addr->sa_data); |
---|
| 1859 | + |
---|
| 1860 | + return rc; |
---|
| 1861 | +} |
---|
| 1862 | + |
---|
| 1863 | +/** |
---|
| 1864 | + * do_change_param_reset returns zero if we are able to keep processing reset |
---|
| 1865 | + * events, or non-zero if we hit a fatal error and must halt. |
---|
| 1866 | + */ |
---|
| 1867 | +static int do_change_param_reset(struct ibmvnic_adapter *adapter, |
---|
| 1868 | + struct ibmvnic_rwi *rwi, |
---|
| 1869 | + u32 reset_state) |
---|
| 1870 | +{ |
---|
| 1871 | + struct net_device *netdev = adapter->netdev; |
---|
| 1872 | + int i, rc; |
---|
| 1873 | + |
---|
| 1874 | + netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n", |
---|
| 1875 | + rwi->reset_reason); |
---|
| 1876 | + |
---|
| 1877 | + netif_carrier_off(netdev); |
---|
| 1878 | + adapter->reset_reason = rwi->reset_reason; |
---|
| 1879 | + |
---|
| 1880 | + ibmvnic_cleanup(netdev); |
---|
| 1881 | + |
---|
| 1882 | + if (reset_state == VNIC_OPEN) { |
---|
| 1883 | + rc = __ibmvnic_close(netdev); |
---|
| 1884 | + if (rc) |
---|
| 1885 | + goto out; |
---|
1749 | 1886 | } |
---|
1750 | 1887 | |
---|
1751 | | - rc = __ibmvnic_set_mac(netdev, addr); |
---|
| 1888 | + release_resources(adapter); |
---|
| 1889 | + release_sub_crqs(adapter, 1); |
---|
| 1890 | + release_crq_queue(adapter); |
---|
1752 | 1891 | |
---|
| 1892 | + adapter->state = VNIC_PROBED; |
---|
| 1893 | + |
---|
| 1894 | + rc = init_crq_queue(adapter); |
---|
| 1895 | + |
---|
| 1896 | + if (rc) { |
---|
| 1897 | + netdev_err(adapter->netdev, |
---|
| 1898 | + "Couldn't initialize crq. rc=%d\n", rc); |
---|
| 1899 | + return rc; |
---|
| 1900 | + } |
---|
| 1901 | + |
---|
| 1902 | + rc = ibmvnic_reset_init(adapter, true); |
---|
| 1903 | + if (rc) { |
---|
| 1904 | + rc = IBMVNIC_INIT_FAILED; |
---|
| 1905 | + goto out; |
---|
| 1906 | + } |
---|
| 1907 | + |
---|
| 1908 | + /* If the adapter was in PROBE state prior to the reset, |
---|
| 1909 | + * exit here. |
---|
| 1910 | + */ |
---|
| 1911 | + if (reset_state == VNIC_PROBED) |
---|
| 1912 | + goto out; |
---|
| 1913 | + |
---|
| 1914 | + rc = ibmvnic_login(netdev); |
---|
| 1915 | + if (rc) { |
---|
| 1916 | + goto out; |
---|
| 1917 | + } |
---|
| 1918 | + |
---|
| 1919 | + rc = init_resources(adapter); |
---|
| 1920 | + if (rc) |
---|
| 1921 | + goto out; |
---|
| 1922 | + |
---|
| 1923 | + ibmvnic_disable_irqs(adapter); |
---|
| 1924 | + |
---|
| 1925 | + adapter->state = VNIC_CLOSED; |
---|
| 1926 | + |
---|
| 1927 | + if (reset_state == VNIC_CLOSED) |
---|
| 1928 | + return 0; |
---|
| 1929 | + |
---|
| 1930 | + rc = __ibmvnic_open(netdev); |
---|
| 1931 | + if (rc) { |
---|
| 1932 | + rc = IBMVNIC_OPEN_FAILED; |
---|
| 1933 | + goto out; |
---|
| 1934 | + } |
---|
| 1935 | + |
---|
| 1936 | + /* refresh device's multicast list */ |
---|
| 1937 | + ibmvnic_set_multi(netdev); |
---|
| 1938 | + |
---|
| 1939 | + /* kick napi */ |
---|
| 1940 | + for (i = 0; i < adapter->req_rx_queues; i++) |
---|
| 1941 | + napi_schedule(&adapter->napi[i]); |
---|
| 1942 | + |
---|
| 1943 | +out: |
---|
| 1944 | + if (rc) |
---|
| 1945 | + adapter->state = reset_state; |
---|
1753 | 1946 | return rc; |
---|
1754 | 1947 | } |
---|
1755 | 1948 | |
---|
.. | .. |
---|
1765 | 1958 | struct net_device *netdev = adapter->netdev; |
---|
1766 | 1959 | int rc; |
---|
1767 | 1960 | |
---|
1768 | | - netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", |
---|
1769 | | - rwi->reset_reason); |
---|
| 1961 | + netdev_dbg(adapter->netdev, |
---|
| 1962 | + "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", |
---|
| 1963 | + adapter->state, adapter->failover_pending, |
---|
| 1964 | + rwi->reset_reason, reset_state); |
---|
| 1965 | + |
---|
| 1966 | + rtnl_lock(); |
---|
| 1967 | + /* |
---|
| 1968 | + * Now that we have the rtnl lock, clear any pending failover. |
---|
| 1969 | + * This will ensure ibmvnic_open() has either completed or will |
---|
| 1970 | + * block until failover is complete. |
---|
| 1971 | + */ |
---|
| 1972 | + if (rwi->reset_reason == VNIC_RESET_FAILOVER) |
---|
| 1973 | + adapter->failover_pending = false; |
---|
1770 | 1974 | |
---|
1771 | 1975 | netif_carrier_off(netdev); |
---|
1772 | 1976 | adapter->reset_reason = rwi->reset_reason; |
---|
.. | .. |
---|
1781 | 1985 | if (reset_state == VNIC_OPEN && |
---|
1782 | 1986 | adapter->reset_reason != VNIC_RESET_MOBILITY && |
---|
1783 | 1987 | adapter->reset_reason != VNIC_RESET_FAILOVER) { |
---|
1784 | | - rc = __ibmvnic_close(netdev); |
---|
1785 | | - if (rc) |
---|
1786 | | - return rc; |
---|
1787 | | - } |
---|
| 1988 | + adapter->state = VNIC_CLOSING; |
---|
1788 | 1989 | |
---|
1789 | | - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || |
---|
1790 | | - adapter->wait_for_reset) { |
---|
1791 | | - release_resources(adapter); |
---|
1792 | | - release_sub_crqs(adapter, 1); |
---|
1793 | | - release_crq_queue(adapter); |
---|
| 1990 | + /* Release the RTNL lock before link state change and |
---|
| 1991 | + * re-acquire after the link state change to allow |
---|
| 1992 | + * linkwatch_event to grab the RTNL lock and run during |
---|
| 1993 | + * a reset. |
---|
| 1994 | + */ |
---|
| 1995 | + rtnl_unlock(); |
---|
| 1996 | + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
---|
| 1997 | + rtnl_lock(); |
---|
| 1998 | + if (rc) |
---|
| 1999 | + goto out; |
---|
| 2000 | + |
---|
| 2001 | + if (adapter->state != VNIC_CLOSING) { |
---|
| 2002 | + rc = -1; |
---|
| 2003 | + goto out; |
---|
| 2004 | + } |
---|
| 2005 | + |
---|
| 2006 | + adapter->state = VNIC_CLOSED; |
---|
1794 | 2007 | } |
---|
1795 | 2008 | |
---|
1796 | 2009 | if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { |
---|
.. | .. |
---|
1799 | 2012 | */ |
---|
1800 | 2013 | adapter->state = VNIC_PROBED; |
---|
1801 | 2014 | |
---|
1802 | | - if (adapter->wait_for_reset) { |
---|
1803 | | - rc = init_crq_queue(adapter); |
---|
1804 | | - } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { |
---|
| 2015 | + if (adapter->reset_reason == VNIC_RESET_MOBILITY) { |
---|
1805 | 2016 | rc = ibmvnic_reenable_crq_queue(adapter); |
---|
1806 | 2017 | release_sub_crqs(adapter, 1); |
---|
1807 | 2018 | } else { |
---|
1808 | 2019 | rc = ibmvnic_reset_crq(adapter); |
---|
1809 | | - if (!rc) |
---|
| 2020 | + if (rc == H_CLOSED || rc == H_SUCCESS) { |
---|
1810 | 2021 | rc = vio_enable_interrupts(adapter->vdev); |
---|
| 2022 | + if (rc) |
---|
| 2023 | + netdev_err(adapter->netdev, |
---|
| 2024 | + "Reset failed to enable interrupts. rc=%d\n", |
---|
| 2025 | + rc); |
---|
| 2026 | + } |
---|
1811 | 2027 | } |
---|
1812 | 2028 | |
---|
1813 | 2029 | if (rc) { |
---|
1814 | 2030 | netdev_err(adapter->netdev, |
---|
1815 | | - "Couldn't initialize crq. rc=%d\n", rc); |
---|
1816 | | - return rc; |
---|
| 2031 | + "Reset couldn't initialize crq. rc=%d\n", rc); |
---|
| 2032 | + goto out; |
---|
1817 | 2033 | } |
---|
1818 | 2034 | |
---|
1819 | | - rc = ibmvnic_reset_init(adapter); |
---|
1820 | | - if (rc) |
---|
1821 | | - return IBMVNIC_INIT_FAILED; |
---|
| 2035 | + rc = ibmvnic_reset_init(adapter, true); |
---|
| 2036 | + if (rc) { |
---|
| 2037 | + rc = IBMVNIC_INIT_FAILED; |
---|
| 2038 | + goto out; |
---|
| 2039 | + } |
---|
1822 | 2040 | |
---|
1823 | 2041 | /* If the adapter was in PROBE state prior to the reset, |
---|
1824 | 2042 | * exit here. |
---|
1825 | 2043 | */ |
---|
1826 | | - if (reset_state == VNIC_PROBED) |
---|
1827 | | - return 0; |
---|
| 2044 | + if (reset_state == VNIC_PROBED) { |
---|
| 2045 | + rc = 0; |
---|
| 2046 | + goto out; |
---|
| 2047 | + } |
---|
1828 | 2048 | |
---|
1829 | 2049 | rc = ibmvnic_login(netdev); |
---|
1830 | 2050 | if (rc) { |
---|
1831 | | - adapter->state = reset_state; |
---|
1832 | | - return rc; |
---|
| 2051 | + goto out; |
---|
1833 | 2052 | } |
---|
1834 | 2053 | |
---|
1835 | | - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || |
---|
1836 | | - adapter->wait_for_reset) { |
---|
1837 | | - rc = init_resources(adapter); |
---|
1838 | | - if (rc) |
---|
1839 | | - return rc; |
---|
1840 | | - } else if (adapter->req_rx_queues != old_num_rx_queues || |
---|
1841 | | - adapter->req_tx_queues != old_num_tx_queues || |
---|
1842 | | - adapter->req_rx_add_entries_per_subcrq != |
---|
1843 | | - old_num_rx_slots || |
---|
1844 | | - adapter->req_tx_entries_per_subcrq != |
---|
1845 | | - old_num_tx_slots) { |
---|
| 2054 | + if (adapter->req_rx_queues != old_num_rx_queues || |
---|
| 2055 | + adapter->req_tx_queues != old_num_tx_queues || |
---|
| 2056 | + adapter->req_rx_add_entries_per_subcrq != |
---|
| 2057 | + old_num_rx_slots || |
---|
| 2058 | + adapter->req_tx_entries_per_subcrq != |
---|
| 2059 | + old_num_tx_slots || |
---|
| 2060 | + !adapter->rx_pool || |
---|
| 2061 | + !adapter->tso_pool || |
---|
| 2062 | + !adapter->tx_pool) { |
---|
1846 | 2063 | release_rx_pools(adapter); |
---|
1847 | 2064 | release_tx_pools(adapter); |
---|
1848 | 2065 | release_napi(adapter); |
---|
.. | .. |
---|
1850 | 2067 | |
---|
1851 | 2068 | rc = init_resources(adapter); |
---|
1852 | 2069 | if (rc) |
---|
1853 | | - return rc; |
---|
| 2070 | + goto out; |
---|
1854 | 2071 | |
---|
1855 | 2072 | } else { |
---|
1856 | 2073 | rc = reset_tx_pools(adapter); |
---|
1857 | | - if (rc) |
---|
1858 | | - return rc; |
---|
| 2074 | + if (rc) { |
---|
| 2075 | + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", |
---|
| 2076 | + rc); |
---|
| 2077 | + goto out; |
---|
| 2078 | + } |
---|
1859 | 2079 | |
---|
1860 | 2080 | rc = reset_rx_pools(adapter); |
---|
1861 | | - if (rc) |
---|
1862 | | - return rc; |
---|
| 2081 | + if (rc) { |
---|
| 2082 | + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", |
---|
| 2083 | + rc); |
---|
| 2084 | + goto out; |
---|
| 2085 | + } |
---|
1863 | 2086 | } |
---|
1864 | 2087 | ibmvnic_disable_irqs(adapter); |
---|
1865 | 2088 | } |
---|
1866 | 2089 | adapter->state = VNIC_CLOSED; |
---|
1867 | 2090 | |
---|
1868 | | - if (reset_state == VNIC_CLOSED) |
---|
1869 | | - return 0; |
---|
| 2091 | + if (reset_state == VNIC_CLOSED) { |
---|
| 2092 | + rc = 0; |
---|
| 2093 | + goto out; |
---|
| 2094 | + } |
---|
1870 | 2095 | |
---|
1871 | 2096 | rc = __ibmvnic_open(netdev); |
---|
1872 | 2097 | if (rc) { |
---|
1873 | | - if (list_empty(&adapter->rwi_list)) |
---|
1874 | | - adapter->state = VNIC_CLOSED; |
---|
1875 | | - else |
---|
1876 | | - adapter->state = reset_state; |
---|
1877 | | - |
---|
1878 | | - return 0; |
---|
| 2098 | + rc = IBMVNIC_OPEN_FAILED; |
---|
| 2099 | + goto out; |
---|
1879 | 2100 | } |
---|
1880 | 2101 | |
---|
1881 | 2102 | /* refresh device's multicast list */ |
---|
1882 | 2103 | ibmvnic_set_multi(netdev); |
---|
1883 | 2104 | |
---|
1884 | | - if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
---|
1885 | | - adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) { |
---|
| 2105 | + if (adapter->reset_reason == VNIC_RESET_FAILOVER || |
---|
| 2106 | + adapter->reset_reason == VNIC_RESET_MOBILITY) { |
---|
1886 | 2107 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); |
---|
1887 | 2108 | call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev); |
---|
1888 | 2109 | } |
---|
1889 | 2110 | |
---|
1890 | | - netif_carrier_on(netdev); |
---|
| 2111 | + rc = 0; |
---|
1891 | 2112 | |
---|
1892 | | - return 0; |
---|
| 2113 | +out: |
---|
| 2114 | + /* restore the adapter state if reset failed */ |
---|
| 2115 | + if (rc) |
---|
| 2116 | + adapter->state = reset_state; |
---|
| 2117 | + rtnl_unlock(); |
---|
| 2118 | + |
---|
| 2119 | + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n", |
---|
| 2120 | + adapter->state, adapter->failover_pending, rc); |
---|
| 2121 | + return rc; |
---|
1893 | 2122 | } |
---|
1894 | 2123 | |
---|
1895 | 2124 | static int do_hard_reset(struct ibmvnic_adapter *adapter, |
---|
.. | .. |
---|
1919 | 2148 | if (rc) { |
---|
1920 | 2149 | netdev_err(adapter->netdev, |
---|
1921 | 2150 | "Couldn't initialize crq. rc=%d\n", rc); |
---|
1922 | | - return rc; |
---|
| 2151 | + goto out; |
---|
1923 | 2152 | } |
---|
1924 | 2153 | |
---|
1925 | | - rc = ibmvnic_init(adapter); |
---|
| 2154 | + rc = ibmvnic_reset_init(adapter, false); |
---|
1926 | 2155 | if (rc) |
---|
1927 | | - return rc; |
---|
| 2156 | + goto out; |
---|
1928 | 2157 | |
---|
1929 | 2158 | /* If the adapter was in PROBE state prior to the reset, |
---|
1930 | 2159 | * exit here. |
---|
1931 | 2160 | */ |
---|
1932 | 2161 | if (reset_state == VNIC_PROBED) |
---|
1933 | | - return 0; |
---|
| 2162 | + goto out; |
---|
1934 | 2163 | |
---|
1935 | 2164 | rc = ibmvnic_login(netdev); |
---|
1936 | | - if (rc) { |
---|
1937 | | - adapter->state = VNIC_PROBED; |
---|
1938 | | - return 0; |
---|
1939 | | - } |
---|
| 2165 | + if (rc) |
---|
| 2166 | + goto out; |
---|
1940 | 2167 | |
---|
1941 | 2168 | rc = init_resources(adapter); |
---|
1942 | 2169 | if (rc) |
---|
1943 | | - return rc; |
---|
| 2170 | + goto out; |
---|
1944 | 2171 | |
---|
1945 | 2172 | ibmvnic_disable_irqs(adapter); |
---|
1946 | 2173 | adapter->state = VNIC_CLOSED; |
---|
1947 | 2174 | |
---|
1948 | 2175 | if (reset_state == VNIC_CLOSED) |
---|
1949 | | - return 0; |
---|
| 2176 | + goto out; |
---|
1950 | 2177 | |
---|
1951 | 2178 | rc = __ibmvnic_open(netdev); |
---|
1952 | 2179 | if (rc) { |
---|
1953 | | - if (list_empty(&adapter->rwi_list)) |
---|
1954 | | - adapter->state = VNIC_CLOSED; |
---|
1955 | | - else |
---|
1956 | | - adapter->state = reset_state; |
---|
1957 | | - |
---|
1958 | | - return 0; |
---|
| 2180 | + rc = IBMVNIC_OPEN_FAILED; |
---|
| 2181 | + goto out; |
---|
1959 | 2182 | } |
---|
1960 | 2183 | |
---|
1961 | | - netif_carrier_on(netdev); |
---|
1962 | | - |
---|
1963 | | - return 0; |
---|
| 2184 | + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); |
---|
| 2185 | + call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev); |
---|
| 2186 | +out: |
---|
| 2187 | + /* restore adapter state if reset failed */ |
---|
| 2188 | + if (rc) |
---|
| 2189 | + adapter->state = reset_state; |
---|
| 2190 | + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n", |
---|
| 2191 | + adapter->state, adapter->failover_pending, rc); |
---|
| 2192 | + return rc; |
---|
1964 | 2193 | } |
---|
1965 | 2194 | |
---|
1966 | 2195 | static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) |
---|
.. | .. |
---|
1982 | 2211 | return rwi; |
---|
1983 | 2212 | } |
---|
1984 | 2213 | |
---|
1985 | | -static void free_all_rwi(struct ibmvnic_adapter *adapter) |
---|
1986 | | -{ |
---|
1987 | | - struct ibmvnic_rwi *rwi; |
---|
1988 | | - |
---|
1989 | | - rwi = get_next_rwi(adapter); |
---|
1990 | | - while (rwi) { |
---|
1991 | | - kfree(rwi); |
---|
1992 | | - rwi = get_next_rwi(adapter); |
---|
1993 | | - } |
---|
1994 | | -} |
---|
1995 | | - |
---|
1996 | 2214 | static void __ibmvnic_reset(struct work_struct *work) |
---|
1997 | 2215 | { |
---|
1998 | 2216 | struct ibmvnic_rwi *rwi; |
---|
1999 | 2217 | struct ibmvnic_adapter *adapter; |
---|
2000 | | - struct net_device *netdev; |
---|
2001 | | - bool we_lock_rtnl = false; |
---|
| 2218 | + bool saved_state = false; |
---|
| 2219 | + unsigned long flags; |
---|
2002 | 2220 | u32 reset_state; |
---|
2003 | 2221 | int rc = 0; |
---|
2004 | 2222 | |
---|
2005 | 2223 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); |
---|
2006 | | - netdev = adapter->netdev; |
---|
2007 | 2224 | |
---|
2008 | | - /* netif_set_real_num_xx_queues needs to take rtnl lock here |
---|
2009 | | - * unless wait_for_reset is set, in which case the rtnl lock |
---|
2010 | | - * has already been taken before initializing the reset |
---|
2011 | | - */ |
---|
2012 | | - if (!adapter->wait_for_reset) { |
---|
2013 | | - rtnl_lock(); |
---|
2014 | | - we_lock_rtnl = true; |
---|
| 2225 | + if (test_and_set_bit_lock(0, &adapter->resetting)) { |
---|
| 2226 | + schedule_delayed_work(&adapter->ibmvnic_delayed_reset, |
---|
| 2227 | + IBMVNIC_RESET_DELAY); |
---|
| 2228 | + return; |
---|
2015 | 2229 | } |
---|
2016 | | - reset_state = adapter->state; |
---|
2017 | 2230 | |
---|
2018 | 2231 | rwi = get_next_rwi(adapter); |
---|
2019 | 2232 | while (rwi) { |
---|
| 2233 | + spin_lock_irqsave(&adapter->state_lock, flags); |
---|
| 2234 | + |
---|
2020 | 2235 | if (adapter->state == VNIC_REMOVING || |
---|
2021 | 2236 | adapter->state == VNIC_REMOVED) { |
---|
| 2237 | + spin_unlock_irqrestore(&adapter->state_lock, flags); |
---|
2022 | 2238 | kfree(rwi); |
---|
2023 | 2239 | rc = EBUSY; |
---|
2024 | 2240 | break; |
---|
2025 | 2241 | } |
---|
2026 | 2242 | |
---|
2027 | | - if (adapter->force_reset_recovery) { |
---|
2028 | | - adapter->force_reset_recovery = false; |
---|
2029 | | - rc = do_hard_reset(adapter, rwi, reset_state); |
---|
| 2243 | + if (!saved_state) { |
---|
| 2244 | + reset_state = adapter->state; |
---|
| 2245 | + saved_state = true; |
---|
| 2246 | + } |
---|
| 2247 | + spin_unlock_irqrestore(&adapter->state_lock, flags); |
---|
| 2248 | + |
---|
| 2249 | + if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) { |
---|
| 2250 | + /* CHANGE_PARAM requestor holds rtnl_lock */ |
---|
| 2251 | + rc = do_change_param_reset(adapter, rwi, reset_state); |
---|
| 2252 | + } else if (adapter->force_reset_recovery) { |
---|
| 2253 | + /* |
---|
| 2254 | + * Since we are doing a hard reset now, clear the |
---|
| 2255 | + * failover_pending flag so we don't ignore any |
---|
| 2256 | + * future MOBILITY or other resets. |
---|
| 2257 | + */ |
---|
| 2258 | + adapter->failover_pending = false; |
---|
| 2259 | + |
---|
| 2260 | + /* Transport event occurred during previous reset */ |
---|
| 2261 | + if (adapter->wait_for_reset) { |
---|
| 2262 | + /* Previous was CHANGE_PARAM; caller locked */ |
---|
| 2263 | + adapter->force_reset_recovery = false; |
---|
| 2264 | + rc = do_hard_reset(adapter, rwi, reset_state); |
---|
| 2265 | + } else { |
---|
| 2266 | + rtnl_lock(); |
---|
| 2267 | + adapter->force_reset_recovery = false; |
---|
| 2268 | + rc = do_hard_reset(adapter, rwi, reset_state); |
---|
| 2269 | + rtnl_unlock(); |
---|
| 2270 | + } |
---|
| 2271 | + if (rc) { |
---|
| 2272 | + /* give backing device time to settle down */ |
---|
| 2273 | + netdev_dbg(adapter->netdev, |
---|
| 2274 | + "[S:%d] Hard reset failed, waiting 60 secs\n", |
---|
| 2275 | + adapter->state); |
---|
| 2276 | + set_current_state(TASK_UNINTERRUPTIBLE); |
---|
| 2277 | + schedule_timeout(60 * HZ); |
---|
| 2278 | + } |
---|
2030 | 2279 | } else { |
---|
2031 | 2280 | rc = do_reset(adapter, rwi, reset_state); |
---|
2032 | 2281 | } |
---|
2033 | 2282 | kfree(rwi); |
---|
2034 | | - if (rc && rc != IBMVNIC_INIT_FAILED && |
---|
2035 | | - !adapter->force_reset_recovery) |
---|
2036 | | - break; |
---|
| 2283 | + adapter->last_reset_time = jiffies; |
---|
| 2284 | + |
---|
| 2285 | + if (rc) |
---|
| 2286 | + netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); |
---|
2037 | 2287 | |
---|
2038 | 2288 | rwi = get_next_rwi(adapter); |
---|
| 2289 | + |
---|
| 2290 | + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || |
---|
| 2291 | + rwi->reset_reason == VNIC_RESET_MOBILITY)) |
---|
| 2292 | + adapter->force_reset_recovery = true; |
---|
2039 | 2293 | } |
---|
2040 | 2294 | |
---|
2041 | 2295 | if (adapter->wait_for_reset) { |
---|
2042 | | - adapter->wait_for_reset = false; |
---|
2043 | 2296 | adapter->reset_done_rc = rc; |
---|
2044 | 2297 | complete(&adapter->reset_done); |
---|
2045 | 2298 | } |
---|
2046 | 2299 | |
---|
2047 | | - if (rc) { |
---|
2048 | | - netdev_dbg(adapter->netdev, "Reset failed\n"); |
---|
2049 | | - free_all_rwi(adapter); |
---|
2050 | | - } |
---|
| 2300 | + clear_bit_unlock(0, &adapter->resetting); |
---|
2051 | 2301 | |
---|
2052 | | - adapter->resetting = false; |
---|
2053 | | - if (we_lock_rtnl) |
---|
2054 | | - rtnl_unlock(); |
---|
| 2302 | + netdev_dbg(adapter->netdev, |
---|
| 2303 | + "[S:%d FRR:%d WFR:%d] Done processing resets\n", |
---|
| 2304 | + adapter->state, adapter->force_reset_recovery, |
---|
| 2305 | + adapter->wait_for_reset); |
---|
| 2306 | +} |
---|
| 2307 | + |
---|
| 2308 | +static void __ibmvnic_delayed_reset(struct work_struct *work) |
---|
| 2309 | +{ |
---|
| 2310 | + struct ibmvnic_adapter *adapter; |
---|
| 2311 | + |
---|
| 2312 | + adapter = container_of(work, struct ibmvnic_adapter, |
---|
| 2313 | + ibmvnic_delayed_reset.work); |
---|
| 2314 | + __ibmvnic_reset(&adapter->ibmvnic_reset); |
---|
2055 | 2315 | } |
---|
2056 | 2316 | |
---|
2057 | 2317 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, |
---|
.. | .. |
---|
2063 | 2323 | unsigned long flags; |
---|
2064 | 2324 | int ret; |
---|
2065 | 2325 | |
---|
| 2326 | + spin_lock_irqsave(&adapter->rwi_lock, flags); |
---|
| 2327 | + |
---|
| 2328 | + /* |
---|
| 2329 | + * If failover is pending don't schedule any other reset. |
---|
| 2330 | + * Instead let the failover complete. If there is already a |
---|
| 2331 | + * a failover reset scheduled, we will detect and drop the |
---|
| 2332 | + * duplicate reset when walking the ->rwi_list below. |
---|
| 2333 | + */ |
---|
2066 | 2334 | if (adapter->state == VNIC_REMOVING || |
---|
2067 | 2335 | adapter->state == VNIC_REMOVED || |
---|
2068 | | - adapter->failover_pending) { |
---|
| 2336 | + (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { |
---|
2069 | 2337 | ret = EBUSY; |
---|
2070 | 2338 | netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); |
---|
2071 | 2339 | goto err; |
---|
.. | .. |
---|
2073 | 2341 | |
---|
2074 | 2342 | if (adapter->state == VNIC_PROBING) { |
---|
2075 | 2343 | netdev_warn(netdev, "Adapter reset during probe\n"); |
---|
2076 | | - ret = adapter->init_done_rc = EAGAIN; |
---|
| 2344 | + adapter->init_done_rc = EAGAIN; |
---|
| 2345 | + ret = EAGAIN; |
---|
2077 | 2346 | goto err; |
---|
2078 | 2347 | } |
---|
2079 | | - |
---|
2080 | | - spin_lock_irqsave(&adapter->rwi_lock, flags); |
---|
2081 | 2348 | |
---|
2082 | 2349 | list_for_each(entry, &adapter->rwi_list) { |
---|
2083 | 2350 | tmp = list_entry(entry, struct ibmvnic_rwi, list); |
---|
2084 | 2351 | if (tmp->reset_reason == reason) { |
---|
2085 | | - netdev_dbg(netdev, "Skipping matching reset\n"); |
---|
2086 | | - spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
---|
| 2352 | + netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", |
---|
| 2353 | + reason); |
---|
2087 | 2354 | ret = EBUSY; |
---|
2088 | 2355 | goto err; |
---|
2089 | 2356 | } |
---|
.. | .. |
---|
2091 | 2358 | |
---|
2092 | 2359 | rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); |
---|
2093 | 2360 | if (!rwi) { |
---|
2094 | | - spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
---|
2095 | | - ibmvnic_close(netdev); |
---|
2096 | 2361 | ret = ENOMEM; |
---|
2097 | 2362 | goto err; |
---|
2098 | 2363 | } |
---|
.. | .. |
---|
2100 | 2365 | * flush reset queue and process this reset |
---|
2101 | 2366 | */ |
---|
2102 | 2367 | if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { |
---|
2103 | | - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) |
---|
| 2368 | + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { |
---|
2104 | 2369 | list_del(entry); |
---|
| 2370 | + kfree(list_entry(entry, struct ibmvnic_rwi, list)); |
---|
| 2371 | + } |
---|
2105 | 2372 | } |
---|
2106 | 2373 | rwi->reset_reason = reason; |
---|
2107 | 2374 | list_add_tail(&rwi->list, &adapter->rwi_list); |
---|
2108 | | - spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
---|
2109 | | - adapter->resetting = true; |
---|
2110 | 2375 | netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); |
---|
2111 | 2376 | schedule_work(&adapter->ibmvnic_reset); |
---|
2112 | 2377 | |
---|
2113 | | - return 0; |
---|
| 2378 | + ret = 0; |
---|
2114 | 2379 | err: |
---|
2115 | | - if (adapter->wait_for_reset) |
---|
2116 | | - adapter->wait_for_reset = false; |
---|
| 2380 | + /* ibmvnic_close() below can block, so drop the lock first */ |
---|
| 2381 | + spin_unlock_irqrestore(&adapter->rwi_lock, flags); |
---|
| 2382 | + |
---|
| 2383 | + if (ret == ENOMEM) |
---|
| 2384 | + ibmvnic_close(netdev); |
---|
| 2385 | + |
---|
2117 | 2386 | return -ret; |
---|
2118 | 2387 | } |
---|
2119 | 2388 | |
---|
2120 | | -static void ibmvnic_tx_timeout(struct net_device *dev) |
---|
| 2389 | +static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
2121 | 2390 | { |
---|
2122 | 2391 | struct ibmvnic_adapter *adapter = netdev_priv(dev); |
---|
2123 | 2392 | |
---|
| 2393 | + if (test_bit(0, &adapter->resetting)) { |
---|
| 2394 | + netdev_err(adapter->netdev, |
---|
| 2395 | + "Adapter is resetting, skip timeout reset\n"); |
---|
| 2396 | + return; |
---|
| 2397 | + } |
---|
| 2398 | + /* No queuing up reset until at least 5 seconds (default watchdog val) |
---|
| 2399 | + * after last reset |
---|
| 2400 | + */ |
---|
| 2401 | + if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { |
---|
| 2402 | + netdev_dbg(dev, "Not yet time to tx timeout.\n"); |
---|
| 2403 | + return; |
---|
| 2404 | + } |
---|
2124 | 2405 | ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); |
---|
2125 | 2406 | } |
---|
2126 | 2407 | |
---|
.. | .. |
---|
2153 | 2434 | u16 offset; |
---|
2154 | 2435 | u8 flags = 0; |
---|
2155 | 2436 | |
---|
2156 | | - if (unlikely(adapter->resetting && |
---|
| 2437 | + if (unlikely(test_bit(0, &adapter->resetting) && |
---|
2157 | 2438 | adapter->reset_reason != VNIC_RESET_NON_FATAL)) { |
---|
2158 | 2439 | enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); |
---|
2159 | 2440 | napi_complete_done(napi, frames_processed); |
---|
.. | .. |
---|
2252 | 2533 | adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; |
---|
2253 | 2534 | adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; |
---|
2254 | 2535 | |
---|
2255 | | - init_completion(&adapter->reset_done); |
---|
| 2536 | + reinit_completion(&adapter->reset_done); |
---|
2256 | 2537 | adapter->wait_for_reset = true; |
---|
2257 | 2538 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); |
---|
2258 | | - if (rc) |
---|
2259 | | - return rc; |
---|
2260 | | - wait_for_completion(&adapter->reset_done); |
---|
| 2539 | + |
---|
| 2540 | + if (rc) { |
---|
| 2541 | + ret = rc; |
---|
| 2542 | + goto out; |
---|
| 2543 | + } |
---|
| 2544 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); |
---|
| 2545 | + if (rc) { |
---|
| 2546 | + ret = -ENODEV; |
---|
| 2547 | + goto out; |
---|
| 2548 | + } |
---|
2261 | 2549 | |
---|
2262 | 2550 | ret = 0; |
---|
2263 | 2551 | if (adapter->reset_done_rc) { |
---|
.. | .. |
---|
2268 | 2556 | adapter->desired.rx_entries = adapter->fallback.rx_entries; |
---|
2269 | 2557 | adapter->desired.tx_entries = adapter->fallback.tx_entries; |
---|
2270 | 2558 | |
---|
2271 | | - init_completion(&adapter->reset_done); |
---|
| 2559 | + reinit_completion(&adapter->reset_done); |
---|
2272 | 2560 | adapter->wait_for_reset = true; |
---|
2273 | 2561 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); |
---|
2274 | | - if (rc) |
---|
2275 | | - return ret; |
---|
2276 | | - wait_for_completion(&adapter->reset_done); |
---|
| 2562 | + if (rc) { |
---|
| 2563 | + ret = rc; |
---|
| 2564 | + goto out; |
---|
| 2565 | + } |
---|
| 2566 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, |
---|
| 2567 | + 60000); |
---|
| 2568 | + if (rc) { |
---|
| 2569 | + ret = -ENODEV; |
---|
| 2570 | + goto out; |
---|
| 2571 | + } |
---|
2277 | 2572 | } |
---|
| 2573 | +out: |
---|
2278 | 2574 | adapter->wait_for_reset = false; |
---|
2279 | 2575 | |
---|
2280 | 2576 | return ret; |
---|
.. | .. |
---|
2323 | 2619 | static int ibmvnic_get_link_ksettings(struct net_device *netdev, |
---|
2324 | 2620 | struct ethtool_link_ksettings *cmd) |
---|
2325 | 2621 | { |
---|
2326 | | - u32 supported, advertising; |
---|
| 2622 | + struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
| 2623 | + int rc; |
---|
2327 | 2624 | |
---|
2328 | | - supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | |
---|
2329 | | - SUPPORTED_FIBRE); |
---|
2330 | | - advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | |
---|
2331 | | - ADVERTISED_FIBRE); |
---|
2332 | | - cmd->base.speed = SPEED_1000; |
---|
2333 | | - cmd->base.duplex = DUPLEX_FULL; |
---|
| 2625 | + rc = send_query_phys_parms(adapter); |
---|
| 2626 | + if (rc) { |
---|
| 2627 | + adapter->speed = SPEED_UNKNOWN; |
---|
| 2628 | + adapter->duplex = DUPLEX_UNKNOWN; |
---|
| 2629 | + } |
---|
| 2630 | + cmd->base.speed = adapter->speed; |
---|
| 2631 | + cmd->base.duplex = adapter->duplex; |
---|
2334 | 2632 | cmd->base.port = PORT_FIBRE; |
---|
2335 | 2633 | cmd->base.phy_address = 0; |
---|
2336 | 2634 | cmd->base.autoneg = AUTONEG_ENABLE; |
---|
2337 | | - |
---|
2338 | | - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, |
---|
2339 | | - supported); |
---|
2340 | | - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, |
---|
2341 | | - advertising); |
---|
2342 | 2635 | |
---|
2343 | 2636 | return 0; |
---|
2344 | 2637 | } |
---|
.. | .. |
---|
2504 | 2797 | cpu_to_be32(sizeof(struct ibmvnic_statistics)); |
---|
2505 | 2798 | |
---|
2506 | 2799 | /* Wait for data to be written */ |
---|
2507 | | - init_completion(&adapter->stats_done); |
---|
| 2800 | + reinit_completion(&adapter->stats_done); |
---|
2508 | 2801 | rc = ibmvnic_send_crq(adapter, &crq); |
---|
2509 | 2802 | if (rc) |
---|
2510 | 2803 | return; |
---|
2511 | | - wait_for_completion(&adapter->stats_done); |
---|
| 2804 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); |
---|
| 2805 | + if (rc) |
---|
| 2806 | + return; |
---|
2512 | 2807 | |
---|
2513 | 2808 | for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) |
---|
2514 | | - data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, |
---|
2515 | | - ibmvnic_stats[i].offset)); |
---|
| 2809 | + data[i] = be64_to_cpu(IBMVNIC_GET_STAT |
---|
| 2810 | + (adapter, ibmvnic_stats[i].offset)); |
---|
2516 | 2811 | |
---|
2517 | 2812 | for (j = 0; j < adapter->req_tx_queues; j++) { |
---|
2518 | 2813 | data[i] = adapter->tx_stats_buffers[j].packets; |
---|
.. | .. |
---|
2555 | 2850 | { |
---|
2556 | 2851 | int rc; |
---|
2557 | 2852 | |
---|
| 2853 | + if (!scrq) { |
---|
| 2854 | + netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); |
---|
| 2855 | + return -EINVAL; |
---|
| 2856 | + } |
---|
| 2857 | + |
---|
2558 | 2858 | if (scrq->irq) { |
---|
2559 | 2859 | free_irq(scrq->irq, scrq); |
---|
2560 | 2860 | irq_dispose_mapping(scrq->irq); |
---|
2561 | 2861 | scrq->irq = 0; |
---|
2562 | 2862 | } |
---|
2563 | | - |
---|
2564 | | - memset(scrq->msgs, 0, 4 * PAGE_SIZE); |
---|
2565 | | - atomic_set(&scrq->used, 0); |
---|
2566 | | - scrq->cur = 0; |
---|
| 2863 | + if (scrq->msgs) { |
---|
| 2864 | + memset(scrq->msgs, 0, 4 * PAGE_SIZE); |
---|
| 2865 | + atomic_set(&scrq->used, 0); |
---|
| 2866 | + scrq->cur = 0; |
---|
| 2867 | + } else { |
---|
| 2868 | + netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); |
---|
| 2869 | + return -EINVAL; |
---|
| 2870 | + } |
---|
2567 | 2871 | |
---|
2568 | 2872 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, |
---|
2569 | 2873 | 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); |
---|
.. | .. |
---|
2759 | 3063 | return 1; |
---|
2760 | 3064 | } |
---|
2761 | 3065 | |
---|
2762 | | - if (adapter->resetting && |
---|
| 3066 | + if (test_bit(0, &adapter->resetting) && |
---|
2763 | 3067 | adapter->reset_reason == VNIC_RESET_MOBILITY) { |
---|
2764 | 3068 | u64 val = (0xff000000) | scrq->hw_irq; |
---|
2765 | 3069 | |
---|
2766 | 3070 | rc = plpar_hcall_norets(H_EOI, val); |
---|
2767 | | - if (rc) |
---|
| 3071 | + /* H_EOI would fail with rc = H_FUNCTION when running |
---|
| 3072 | + * in XIVE mode which is expected, but not an error. |
---|
| 3073 | + */ |
---|
| 3074 | + if (rc && rc != H_FUNCTION) |
---|
2768 | 3075 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", |
---|
2769 | 3076 | val, rc); |
---|
2770 | 3077 | } |
---|
.. | .. |
---|
2907 | 3214 | goto req_tx_irq_failed; |
---|
2908 | 3215 | } |
---|
2909 | 3216 | |
---|
| 3217 | + snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", |
---|
| 3218 | + adapter->vdev->unit_address, i); |
---|
2910 | 3219 | rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, |
---|
2911 | | - 0, "ibmvnic_tx", scrq); |
---|
| 3220 | + 0, scrq->name, scrq); |
---|
2912 | 3221 | |
---|
2913 | 3222 | if (rc) { |
---|
2914 | 3223 | dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", |
---|
.. | .. |
---|
2928 | 3237 | dev_err(dev, "Error mapping irq\n"); |
---|
2929 | 3238 | goto req_rx_irq_failed; |
---|
2930 | 3239 | } |
---|
| 3240 | + snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", |
---|
| 3241 | + adapter->vdev->unit_address, i); |
---|
2931 | 3242 | rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, |
---|
2932 | | - 0, "ibmvnic_rx", scrq); |
---|
| 3243 | + 0, scrq->name, scrq); |
---|
2933 | 3244 | if (rc) { |
---|
2934 | 3245 | dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", |
---|
2935 | 3246 | scrq->irq, rc); |
---|
.. | .. |
---|
3039 | 3350 | return -1; |
---|
3040 | 3351 | } |
---|
3041 | 3352 | |
---|
3042 | | -static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) |
---|
| 3353 | +static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) |
---|
3043 | 3354 | { |
---|
3044 | 3355 | struct device *dev = &adapter->vdev->dev; |
---|
3045 | 3356 | union ibmvnic_crq crq; |
---|
.. | .. |
---|
3335 | 3646 | if (rc) { |
---|
3336 | 3647 | if (rc == H_CLOSED) { |
---|
3337 | 3648 | dev_warn(dev, "CRQ Queue closed\n"); |
---|
3338 | | - if (adapter->resetting) |
---|
3339 | | - ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
---|
| 3649 | + /* do not reset, report the fail, wait for passive init from server */ |
---|
3340 | 3650 | } |
---|
3341 | 3651 | |
---|
3342 | 3652 | dev_warn(dev, "Send error (rc=%d)\n", rc); |
---|
.. | .. |
---|
3347 | 3657 | |
---|
3348 | 3658 | static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) |
---|
3349 | 3659 | { |
---|
| 3660 | + struct device *dev = &adapter->vdev->dev; |
---|
3350 | 3661 | union ibmvnic_crq crq; |
---|
| 3662 | + int retries = 100; |
---|
| 3663 | + int rc; |
---|
3351 | 3664 | |
---|
3352 | 3665 | memset(&crq, 0, sizeof(crq)); |
---|
3353 | 3666 | crq.generic.first = IBMVNIC_CRQ_INIT_CMD; |
---|
3354 | 3667 | crq.generic.cmd = IBMVNIC_CRQ_INIT; |
---|
3355 | 3668 | netdev_dbg(adapter->netdev, "Sending CRQ init\n"); |
---|
3356 | 3669 | |
---|
3357 | | - return ibmvnic_send_crq(adapter, &crq); |
---|
| 3670 | + do { |
---|
| 3671 | + rc = ibmvnic_send_crq(adapter, &crq); |
---|
| 3672 | + if (rc != H_CLOSED) |
---|
| 3673 | + break; |
---|
| 3674 | + retries--; |
---|
| 3675 | + msleep(50); |
---|
| 3676 | + |
---|
| 3677 | + } while (retries > 0); |
---|
| 3678 | + |
---|
| 3679 | + if (rc) { |
---|
| 3680 | + dev_err(dev, "Failed to send init request, rc = %d\n", rc); |
---|
| 3681 | + return rc; |
---|
| 3682 | + } |
---|
| 3683 | + |
---|
| 3684 | + return 0; |
---|
3358 | 3685 | } |
---|
3359 | 3686 | |
---|
3360 | 3687 | static int send_version_xchg(struct ibmvnic_adapter *adapter) |
---|
.. | .. |
---|
3423 | 3750 | struct ibmvnic_login_rsp_buffer *login_rsp_buffer; |
---|
3424 | 3751 | struct ibmvnic_login_buffer *login_buffer; |
---|
3425 | 3752 | struct device *dev = &adapter->vdev->dev; |
---|
| 3753 | + struct vnic_login_client_data *vlcd; |
---|
3426 | 3754 | dma_addr_t rsp_buffer_token; |
---|
3427 | 3755 | dma_addr_t buffer_token; |
---|
3428 | 3756 | size_t rsp_buffer_size; |
---|
3429 | 3757 | union ibmvnic_crq crq; |
---|
| 3758 | + int client_data_len; |
---|
3430 | 3759 | size_t buffer_size; |
---|
3431 | 3760 | __be64 *tx_list_p; |
---|
3432 | 3761 | __be64 *rx_list_p; |
---|
3433 | | - int client_data_len; |
---|
3434 | | - struct vnic_login_client_data *vlcd; |
---|
| 3762 | + int rc; |
---|
3435 | 3763 | int i; |
---|
3436 | 3764 | |
---|
3437 | 3765 | if (!adapter->tx_scrq || !adapter->rx_scrq) { |
---|
.. | .. |
---|
3440 | 3768 | return -1; |
---|
3441 | 3769 | } |
---|
3442 | 3770 | |
---|
| 3771 | + release_login_buffer(adapter); |
---|
3443 | 3772 | release_login_rsp_buffer(adapter); |
---|
| 3773 | + |
---|
3444 | 3774 | client_data_len = vnic_client_data_len(adapter); |
---|
3445 | 3775 | |
---|
3446 | 3776 | buffer_size = |
---|
.. | .. |
---|
3535 | 3865 | crq.login.cmd = LOGIN; |
---|
3536 | 3866 | crq.login.ioba = cpu_to_be32(buffer_token); |
---|
3537 | 3867 | crq.login.len = cpu_to_be32(buffer_size); |
---|
3538 | | - ibmvnic_send_crq(adapter, &crq); |
---|
| 3868 | + |
---|
| 3869 | + adapter->login_pending = true; |
---|
| 3870 | + rc = ibmvnic_send_crq(adapter, &crq); |
---|
| 3871 | + if (rc) { |
---|
| 3872 | + adapter->login_pending = false; |
---|
| 3873 | + netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); |
---|
| 3874 | + goto buf_send_failed; |
---|
| 3875 | + } |
---|
3539 | 3876 | |
---|
3540 | 3877 | return 0; |
---|
3541 | 3878 | |
---|
| 3879 | +buf_send_failed: |
---|
| 3880 | + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, |
---|
| 3881 | + DMA_FROM_DEVICE); |
---|
3542 | 3882 | buf_rsp_map_failed: |
---|
3543 | 3883 | kfree(login_rsp_buffer); |
---|
| 3884 | + adapter->login_rsp_buf = NULL; |
---|
3544 | 3885 | buf_rsp_alloc_failed: |
---|
3545 | 3886 | dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); |
---|
3546 | 3887 | buf_map_failed: |
---|
3547 | 3888 | kfree(login_buffer); |
---|
| 3889 | + adapter->login_buf = NULL; |
---|
3548 | 3890 | buf_alloc_failed: |
---|
3549 | 3891 | return -1; |
---|
3550 | 3892 | } |
---|
.. | .. |
---|
3574 | 3916 | return ibmvnic_send_crq(adapter, &crq); |
---|
3575 | 3917 | } |
---|
3576 | 3918 | |
---|
3577 | | -static void send_map_query(struct ibmvnic_adapter *adapter) |
---|
| 3919 | +static void send_query_map(struct ibmvnic_adapter *adapter) |
---|
3578 | 3920 | { |
---|
3579 | 3921 | union ibmvnic_crq crq; |
---|
3580 | 3922 | |
---|
.. | .. |
---|
3585 | 3927 | } |
---|
3586 | 3928 | |
---|
3587 | 3929 | /* Send a series of CRQs requesting various capabilities of the VNIC server */ |
---|
3588 | | -static void send_cap_queries(struct ibmvnic_adapter *adapter) |
---|
| 3930 | +static void send_query_cap(struct ibmvnic_adapter *adapter) |
---|
3589 | 3931 | { |
---|
3590 | 3932 | union ibmvnic_crq crq; |
---|
3591 | 3933 | int cap_reqs; |
---|
.. | .. |
---|
3716 | 4058 | WARN_ON(cap_reqs != 0); |
---|
3717 | 4059 | } |
---|
3718 | 4060 | |
---|
| 4061 | +static void send_query_ip_offload(struct ibmvnic_adapter *adapter) |
---|
| 4062 | +{ |
---|
| 4063 | + int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); |
---|
| 4064 | + struct device *dev = &adapter->vdev->dev; |
---|
| 4065 | + union ibmvnic_crq crq; |
---|
| 4066 | + |
---|
| 4067 | + adapter->ip_offload_tok = |
---|
| 4068 | + dma_map_single(dev, |
---|
| 4069 | + &adapter->ip_offload_buf, |
---|
| 4070 | + buf_sz, |
---|
| 4071 | + DMA_FROM_DEVICE); |
---|
| 4072 | + |
---|
| 4073 | + if (dma_mapping_error(dev, adapter->ip_offload_tok)) { |
---|
| 4074 | + if (!firmware_has_feature(FW_FEATURE_CMO)) |
---|
| 4075 | + dev_err(dev, "Couldn't map offload buffer\n"); |
---|
| 4076 | + return; |
---|
| 4077 | + } |
---|
| 4078 | + |
---|
| 4079 | + memset(&crq, 0, sizeof(crq)); |
---|
| 4080 | + crq.query_ip_offload.first = IBMVNIC_CRQ_CMD; |
---|
| 4081 | + crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; |
---|
| 4082 | + crq.query_ip_offload.len = cpu_to_be32(buf_sz); |
---|
| 4083 | + crq.query_ip_offload.ioba = |
---|
| 4084 | + cpu_to_be32(adapter->ip_offload_tok); |
---|
| 4085 | + |
---|
| 4086 | + ibmvnic_send_crq(adapter, &crq); |
---|
| 4087 | +} |
---|
| 4088 | + |
---|
| 4089 | +static void send_control_ip_offload(struct ibmvnic_adapter *adapter) |
---|
| 4090 | +{ |
---|
| 4091 | + struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; |
---|
| 4092 | + struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
---|
| 4093 | + struct device *dev = &adapter->vdev->dev; |
---|
| 4094 | + netdev_features_t old_hw_features = 0; |
---|
| 4095 | + union ibmvnic_crq crq; |
---|
| 4096 | + |
---|
| 4097 | + adapter->ip_offload_ctrl_tok = |
---|
| 4098 | + dma_map_single(dev, |
---|
| 4099 | + ctrl_buf, |
---|
| 4100 | + sizeof(adapter->ip_offload_ctrl), |
---|
| 4101 | + DMA_TO_DEVICE); |
---|
| 4102 | + |
---|
| 4103 | + if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { |
---|
| 4104 | + dev_err(dev, "Couldn't map ip offload control buffer\n"); |
---|
| 4105 | + return; |
---|
| 4106 | + } |
---|
| 4107 | + |
---|
| 4108 | + ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
---|
| 4109 | + ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); |
---|
| 4110 | + ctrl_buf->ipv4_chksum = buf->ipv4_chksum; |
---|
| 4111 | + ctrl_buf->ipv6_chksum = buf->ipv6_chksum; |
---|
| 4112 | + ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; |
---|
| 4113 | + ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; |
---|
| 4114 | + ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; |
---|
| 4115 | + ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; |
---|
| 4116 | + ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; |
---|
| 4117 | + ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; |
---|
| 4118 | + |
---|
| 4119 | + /* large_rx disabled for now, additional features needed */ |
---|
| 4120 | + ctrl_buf->large_rx_ipv4 = 0; |
---|
| 4121 | + ctrl_buf->large_rx_ipv6 = 0; |
---|
| 4122 | + |
---|
| 4123 | + if (adapter->state != VNIC_PROBING) { |
---|
| 4124 | + old_hw_features = adapter->netdev->hw_features; |
---|
| 4125 | + adapter->netdev->hw_features = 0; |
---|
| 4126 | + } |
---|
| 4127 | + |
---|
| 4128 | + adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; |
---|
| 4129 | + |
---|
| 4130 | + if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) |
---|
| 4131 | + adapter->netdev->hw_features |= NETIF_F_IP_CSUM; |
---|
| 4132 | + |
---|
| 4133 | + if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) |
---|
| 4134 | + adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; |
---|
| 4135 | + |
---|
| 4136 | + if ((adapter->netdev->features & |
---|
| 4137 | + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
---|
| 4138 | + adapter->netdev->hw_features |= NETIF_F_RXCSUM; |
---|
| 4139 | + |
---|
| 4140 | + if (buf->large_tx_ipv4) |
---|
| 4141 | + adapter->netdev->hw_features |= NETIF_F_TSO; |
---|
| 4142 | + if (buf->large_tx_ipv6) |
---|
| 4143 | + adapter->netdev->hw_features |= NETIF_F_TSO6; |
---|
| 4144 | + |
---|
| 4145 | + if (adapter->state == VNIC_PROBING) { |
---|
| 4146 | + adapter->netdev->features |= adapter->netdev->hw_features; |
---|
| 4147 | + } else if (old_hw_features != adapter->netdev->hw_features) { |
---|
| 4148 | + netdev_features_t tmp = 0; |
---|
| 4149 | + |
---|
| 4150 | + /* disable features no longer supported */ |
---|
| 4151 | + adapter->netdev->features &= adapter->netdev->hw_features; |
---|
| 4152 | + /* turn on features now supported if previously enabled */ |
---|
| 4153 | + tmp = (old_hw_features ^ adapter->netdev->hw_features) & |
---|
| 4154 | + adapter->netdev->hw_features; |
---|
| 4155 | + adapter->netdev->features |= |
---|
| 4156 | + tmp & adapter->netdev->wanted_features; |
---|
| 4157 | + } |
---|
| 4158 | + |
---|
| 4159 | + memset(&crq, 0, sizeof(crq)); |
---|
| 4160 | + crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; |
---|
| 4161 | + crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; |
---|
| 4162 | + crq.control_ip_offload.len = |
---|
| 4163 | + cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
---|
| 4164 | + crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); |
---|
| 4165 | + ibmvnic_send_crq(adapter, &crq); |
---|
| 4166 | +} |
---|
| 4167 | + |
---|
3719 | 4168 | static void handle_vpd_size_rsp(union ibmvnic_crq *crq, |
---|
3720 | 4169 | struct ibmvnic_adapter *adapter) |
---|
3721 | 4170 | { |
---|
.. | .. |
---|
3785 | 4234 | { |
---|
3786 | 4235 | struct device *dev = &adapter->vdev->dev; |
---|
3787 | 4236 | struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; |
---|
3788 | | - union ibmvnic_crq crq; |
---|
3789 | 4237 | int i; |
---|
3790 | 4238 | |
---|
3791 | 4239 | dma_unmap_single(dev, adapter->ip_offload_tok, |
---|
.. | .. |
---|
3835 | 4283 | netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", |
---|
3836 | 4284 | buf->off_ipv6_ext_headers); |
---|
3837 | 4285 | |
---|
3838 | | - adapter->ip_offload_ctrl_tok = |
---|
3839 | | - dma_map_single(dev, &adapter->ip_offload_ctrl, |
---|
3840 | | - sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); |
---|
3841 | | - |
---|
3842 | | - if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { |
---|
3843 | | - dev_err(dev, "Couldn't map ip offload control buffer\n"); |
---|
3844 | | - return; |
---|
3845 | | - } |
---|
3846 | | - |
---|
3847 | | - adapter->ip_offload_ctrl.len = |
---|
3848 | | - cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
---|
3849 | | - adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); |
---|
3850 | | - adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum; |
---|
3851 | | - adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum; |
---|
3852 | | - adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; |
---|
3853 | | - adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; |
---|
3854 | | - adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; |
---|
3855 | | - adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; |
---|
3856 | | - adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; |
---|
3857 | | - adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; |
---|
3858 | | - |
---|
3859 | | - /* large_rx disabled for now, additional features needed */ |
---|
3860 | | - adapter->ip_offload_ctrl.large_rx_ipv4 = 0; |
---|
3861 | | - adapter->ip_offload_ctrl.large_rx_ipv6 = 0; |
---|
3862 | | - |
---|
3863 | | - adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; |
---|
3864 | | - |
---|
3865 | | - if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) |
---|
3866 | | - adapter->netdev->features |= NETIF_F_IP_CSUM; |
---|
3867 | | - |
---|
3868 | | - if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) |
---|
3869 | | - adapter->netdev->features |= NETIF_F_IPV6_CSUM; |
---|
3870 | | - |
---|
3871 | | - if ((adapter->netdev->features & |
---|
3872 | | - (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) |
---|
3873 | | - adapter->netdev->features |= NETIF_F_RXCSUM; |
---|
3874 | | - |
---|
3875 | | - if (buf->large_tx_ipv4) |
---|
3876 | | - adapter->netdev->features |= NETIF_F_TSO; |
---|
3877 | | - if (buf->large_tx_ipv6) |
---|
3878 | | - adapter->netdev->features |= NETIF_F_TSO6; |
---|
3879 | | - |
---|
3880 | | - adapter->netdev->hw_features |= adapter->netdev->features; |
---|
3881 | | - |
---|
3882 | | - memset(&crq, 0, sizeof(crq)); |
---|
3883 | | - crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; |
---|
3884 | | - crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; |
---|
3885 | | - crq.control_ip_offload.len = |
---|
3886 | | - cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); |
---|
3887 | | - crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); |
---|
3888 | | - ibmvnic_send_crq(adapter, &crq); |
---|
| 4286 | + send_control_ip_offload(adapter); |
---|
3889 | 4287 | } |
---|
3890 | 4288 | |
---|
3891 | 4289 | static const char *ibmvnic_fw_err_cause(u16 cause) |
---|
.. | .. |
---|
3942 | 4340 | dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); |
---|
3943 | 4341 | goto out; |
---|
3944 | 4342 | } |
---|
3945 | | - memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], |
---|
3946 | | - ETH_ALEN); |
---|
| 4343 | + /* crq->change_mac_addr.mac_addr is the requested one |
---|
| 4344 | + * crq->change_mac_addr_rsp.mac_addr is the returned valid one. |
---|
| 4345 | + */ |
---|
| 4346 | + ether_addr_copy(netdev->dev_addr, |
---|
| 4347 | + &crq->change_mac_addr_rsp.mac_addr[0]); |
---|
| 4348 | + ether_addr_copy(adapter->mac_addr, |
---|
| 4349 | + &crq->change_mac_addr_rsp.mac_addr[0]); |
---|
3947 | 4350 | out: |
---|
3948 | 4351 | complete(&adapter->fw_done); |
---|
3949 | 4352 | return rc; |
---|
.. | .. |
---|
4013 | 4416 | be64_to_cpu(crq->request_capability_rsp.number); |
---|
4014 | 4417 | } |
---|
4015 | 4418 | |
---|
4016 | | - ibmvnic_send_req_caps(adapter, 1); |
---|
| 4419 | + send_request_cap(adapter, 1); |
---|
4017 | 4420 | return; |
---|
4018 | 4421 | default: |
---|
4019 | 4422 | dev_err(dev, "Error %d in request cap rsp\n", |
---|
.. | .. |
---|
4023 | 4426 | |
---|
4024 | 4427 | /* Done receiving requested capabilities, query IP offload support */ |
---|
4025 | 4428 | if (atomic_read(&adapter->running_cap_crqs) == 0) { |
---|
4026 | | - union ibmvnic_crq newcrq; |
---|
4027 | | - int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); |
---|
4028 | | - struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = |
---|
4029 | | - &adapter->ip_offload_buf; |
---|
4030 | | - |
---|
4031 | 4429 | adapter->wait_capability = false; |
---|
4032 | | - adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, |
---|
4033 | | - buf_sz, |
---|
4034 | | - DMA_FROM_DEVICE); |
---|
4035 | | - |
---|
4036 | | - if (dma_mapping_error(dev, adapter->ip_offload_tok)) { |
---|
4037 | | - if (!firmware_has_feature(FW_FEATURE_CMO)) |
---|
4038 | | - dev_err(dev, "Couldn't map offload buffer\n"); |
---|
4039 | | - return; |
---|
4040 | | - } |
---|
4041 | | - |
---|
4042 | | - memset(&newcrq, 0, sizeof(newcrq)); |
---|
4043 | | - newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; |
---|
4044 | | - newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; |
---|
4045 | | - newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); |
---|
4046 | | - newcrq.query_ip_offload.ioba = |
---|
4047 | | - cpu_to_be32(adapter->ip_offload_tok); |
---|
4048 | | - |
---|
4049 | | - ibmvnic_send_crq(adapter, &newcrq); |
---|
| 4430 | + send_query_ip_offload(adapter); |
---|
4050 | 4431 | } |
---|
4051 | 4432 | } |
---|
4052 | 4433 | |
---|
.. | .. |
---|
4057 | 4438 | struct net_device *netdev = adapter->netdev; |
---|
4058 | 4439 | struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; |
---|
4059 | 4440 | struct ibmvnic_login_buffer *login = adapter->login_buf; |
---|
| 4441 | + u64 *tx_handle_array; |
---|
| 4442 | + u64 *rx_handle_array; |
---|
| 4443 | + int num_tx_pools; |
---|
| 4444 | + int num_rx_pools; |
---|
| 4445 | + u64 *size_array; |
---|
| 4446 | + u32 rsp_len; |
---|
4060 | 4447 | int i; |
---|
4061 | 4448 | |
---|
4062 | | - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, |
---|
4063 | | - DMA_TO_DEVICE); |
---|
4064 | | - dma_unmap_single(dev, adapter->login_rsp_buf_token, |
---|
4065 | | - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); |
---|
| 4449 | + /* CHECK: Test/set of login_pending does not need to be atomic |
---|
| 4450 | + * because only ibmvnic_tasklet tests/clears this. |
---|
| 4451 | + */ |
---|
| 4452 | + if (!adapter->login_pending) { |
---|
| 4453 | + netdev_warn(netdev, "Ignoring unexpected login response\n"); |
---|
| 4454 | + return 0; |
---|
| 4455 | + } |
---|
| 4456 | + adapter->login_pending = false; |
---|
4066 | 4457 | |
---|
4067 | 4458 | /* If the number of queues requested can't be allocated by the |
---|
4068 | 4459 | * server, the login response will return with code 1. We will need |
---|
.. | .. |
---|
4096 | 4487 | adapter->req_rx_add_queues != |
---|
4097 | 4488 | be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { |
---|
4098 | 4489 | dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); |
---|
4099 | | - ibmvnic_remove(adapter->vdev); |
---|
| 4490 | + ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
---|
4100 | 4491 | return -EIO; |
---|
4101 | 4492 | } |
---|
| 4493 | + |
---|
| 4494 | + rsp_len = be32_to_cpu(login_rsp->len); |
---|
| 4495 | + if (be32_to_cpu(login->login_rsp_len) < rsp_len || |
---|
| 4496 | + rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || |
---|
| 4497 | + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || |
---|
| 4498 | + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || |
---|
| 4499 | + rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { |
---|
| 4500 | + /* This can happen if a login request times out and there are |
---|
| 4501 | + * 2 outstanding login requests sent, the LOGIN_RSP crq |
---|
| 4502 | + * could have been for the older login request. So we are |
---|
| 4503 | + * parsing the newer response buffer which may be incomplete |
---|
| 4504 | + */ |
---|
| 4505 | + dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); |
---|
| 4506 | + ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
---|
| 4507 | + return -EIO; |
---|
| 4508 | + } |
---|
| 4509 | + |
---|
| 4510 | + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
| 4511 | + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
---|
| 4512 | + /* variable buffer sizes are not supported, so just read the |
---|
| 4513 | + * first entry. |
---|
| 4514 | + */ |
---|
| 4515 | + adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); |
---|
| 4516 | + |
---|
| 4517 | + num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
---|
| 4518 | + num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
| 4519 | + |
---|
| 4520 | + tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
| 4521 | + be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); |
---|
| 4522 | + rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
---|
| 4523 | + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); |
---|
| 4524 | + |
---|
| 4525 | + for (i = 0; i < num_tx_pools; i++) |
---|
| 4526 | + adapter->tx_scrq[i]->handle = tx_handle_array[i]; |
---|
| 4527 | + |
---|
| 4528 | + for (i = 0; i < num_rx_pools; i++) |
---|
| 4529 | + adapter->rx_scrq[i]->handle = rx_handle_array[i]; |
---|
| 4530 | + |
---|
| 4531 | + adapter->num_active_tx_scrqs = num_tx_pools; |
---|
| 4532 | + adapter->num_active_rx_scrqs = num_rx_pools; |
---|
| 4533 | + release_login_rsp_buffer(adapter); |
---|
4102 | 4534 | release_login_buffer(adapter); |
---|
4103 | 4535 | complete(&adapter->init_done); |
---|
4104 | 4536 | |
---|
.. | .. |
---|
4308 | 4740 | out: |
---|
4309 | 4741 | if (atomic_read(&adapter->running_cap_crqs) == 0) { |
---|
4310 | 4742 | adapter->wait_capability = false; |
---|
4311 | | - ibmvnic_send_req_caps(adapter, 0); |
---|
| 4743 | + send_request_cap(adapter, 0); |
---|
4312 | 4744 | } |
---|
| 4745 | +} |
---|
| 4746 | + |
---|
| 4747 | +static int send_query_phys_parms(struct ibmvnic_adapter *adapter) |
---|
| 4748 | +{ |
---|
| 4749 | + union ibmvnic_crq crq; |
---|
| 4750 | + int rc; |
---|
| 4751 | + |
---|
| 4752 | + memset(&crq, 0, sizeof(crq)); |
---|
| 4753 | + crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; |
---|
| 4754 | + crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; |
---|
| 4755 | + |
---|
| 4756 | + mutex_lock(&adapter->fw_lock); |
---|
| 4757 | + adapter->fw_done_rc = 0; |
---|
| 4758 | + reinit_completion(&adapter->fw_done); |
---|
| 4759 | + |
---|
| 4760 | + rc = ibmvnic_send_crq(adapter, &crq); |
---|
| 4761 | + if (rc) { |
---|
| 4762 | + mutex_unlock(&adapter->fw_lock); |
---|
| 4763 | + return rc; |
---|
| 4764 | + } |
---|
| 4765 | + |
---|
| 4766 | + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); |
---|
| 4767 | + if (rc) { |
---|
| 4768 | + mutex_unlock(&adapter->fw_lock); |
---|
| 4769 | + return rc; |
---|
| 4770 | + } |
---|
| 4771 | + |
---|
| 4772 | + mutex_unlock(&adapter->fw_lock); |
---|
| 4773 | + return adapter->fw_done_rc ? -EIO : 0; |
---|
| 4774 | +} |
---|
| 4775 | + |
---|
| 4776 | +static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, |
---|
| 4777 | + struct ibmvnic_adapter *adapter) |
---|
| 4778 | +{ |
---|
| 4779 | + struct net_device *netdev = adapter->netdev; |
---|
| 4780 | + int rc; |
---|
| 4781 | + __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); |
---|
| 4782 | + |
---|
| 4783 | + rc = crq->query_phys_parms_rsp.rc.code; |
---|
| 4784 | + if (rc) { |
---|
| 4785 | + netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); |
---|
| 4786 | + return rc; |
---|
| 4787 | + } |
---|
| 4788 | + switch (rspeed) { |
---|
| 4789 | + case IBMVNIC_10MBPS: |
---|
| 4790 | + adapter->speed = SPEED_10; |
---|
| 4791 | + break; |
---|
| 4792 | + case IBMVNIC_100MBPS: |
---|
| 4793 | + adapter->speed = SPEED_100; |
---|
| 4794 | + break; |
---|
| 4795 | + case IBMVNIC_1GBPS: |
---|
| 4796 | + adapter->speed = SPEED_1000; |
---|
| 4797 | + break; |
---|
| 4798 | + case IBMVNIC_10GBPS: |
---|
| 4799 | + adapter->speed = SPEED_10000; |
---|
| 4800 | + break; |
---|
| 4801 | + case IBMVNIC_25GBPS: |
---|
| 4802 | + adapter->speed = SPEED_25000; |
---|
| 4803 | + break; |
---|
| 4804 | + case IBMVNIC_40GBPS: |
---|
| 4805 | + adapter->speed = SPEED_40000; |
---|
| 4806 | + break; |
---|
| 4807 | + case IBMVNIC_50GBPS: |
---|
| 4808 | + adapter->speed = SPEED_50000; |
---|
| 4809 | + break; |
---|
| 4810 | + case IBMVNIC_100GBPS: |
---|
| 4811 | + adapter->speed = SPEED_100000; |
---|
| 4812 | + break; |
---|
| 4813 | + case IBMVNIC_200GBPS: |
---|
| 4814 | + adapter->speed = SPEED_200000; |
---|
| 4815 | + break; |
---|
| 4816 | + default: |
---|
| 4817 | + if (netif_carrier_ok(netdev)) |
---|
| 4818 | + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); |
---|
| 4819 | + adapter->speed = SPEED_UNKNOWN; |
---|
| 4820 | + } |
---|
| 4821 | + if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) |
---|
| 4822 | + adapter->duplex = DUPLEX_FULL; |
---|
| 4823 | + else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) |
---|
| 4824 | + adapter->duplex = DUPLEX_HALF; |
---|
| 4825 | + else |
---|
| 4826 | + adapter->duplex = DUPLEX_UNKNOWN; |
---|
| 4827 | + |
---|
| 4828 | + return rc; |
---|
4313 | 4829 | } |
---|
4314 | 4830 | |
---|
4315 | 4831 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
---|
.. | .. |
---|
4330 | 4846 | case IBMVNIC_CRQ_INIT: |
---|
4331 | 4847 | dev_info(dev, "Partner initialized\n"); |
---|
4332 | 4848 | adapter->from_passive_init = true; |
---|
4333 | | - adapter->failover_pending = false; |
---|
| 4849 | + /* Discard any stale login responses from prev reset. |
---|
| 4850 | + * CHECK: should we clear even on INIT_COMPLETE? |
---|
| 4851 | + */ |
---|
| 4852 | + adapter->login_pending = false; |
---|
| 4853 | + |
---|
4334 | 4854 | if (!completion_done(&adapter->init_done)) { |
---|
4335 | 4855 | complete(&adapter->init_done); |
---|
4336 | 4856 | adapter->init_done_rc = -EIO; |
---|
4337 | 4857 | } |
---|
4338 | | - ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
---|
| 4858 | + rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
---|
| 4859 | + if (rc && rc != -EBUSY) { |
---|
| 4860 | + /* We were unable to schedule the failover |
---|
| 4861 | + * reset either because the adapter was still |
---|
| 4862 | + * probing (eg: during kexec) or we could not |
---|
| 4863 | + * allocate memory. Clear the failover_pending |
---|
| 4864 | + * flag since no one else will. We ignore |
---|
| 4865 | + * EBUSY because it means either FAILOVER reset |
---|
| 4866 | + * is already scheduled or the adapter is |
---|
| 4867 | + * being removed. |
---|
| 4868 | + */ |
---|
| 4869 | + netdev_err(netdev, |
---|
| 4870 | + "Error %ld scheduling failover reset\n", |
---|
| 4871 | + rc); |
---|
| 4872 | + adapter->failover_pending = false; |
---|
| 4873 | + } |
---|
4339 | 4874 | break; |
---|
4340 | 4875 | case IBMVNIC_CRQ_INIT_COMPLETE: |
---|
4341 | 4876 | dev_info(dev, "Partner initialization complete\n"); |
---|
.. | .. |
---|
4349 | 4884 | case IBMVNIC_CRQ_XPORT_EVENT: |
---|
4350 | 4885 | netif_carrier_off(netdev); |
---|
4351 | 4886 | adapter->crq.active = false; |
---|
4352 | | - if (adapter->resetting) |
---|
| 4887 | + /* terminate any thread waiting for a response |
---|
| 4888 | + * from the device |
---|
| 4889 | + */ |
---|
| 4890 | + if (!completion_done(&adapter->fw_done)) { |
---|
| 4891 | + adapter->fw_done_rc = -EIO; |
---|
| 4892 | + complete(&adapter->fw_done); |
---|
| 4893 | + } |
---|
| 4894 | + |
---|
| 4895 | + /* if we got here during crq-init, retry crq-init */ |
---|
| 4896 | + if (!completion_done(&adapter->init_done)) { |
---|
| 4897 | + adapter->init_done_rc = -EAGAIN; |
---|
| 4898 | + complete(&adapter->init_done); |
---|
| 4899 | + } |
---|
| 4900 | + |
---|
| 4901 | + if (!completion_done(&adapter->stats_done)) |
---|
| 4902 | + complete(&adapter->stats_done); |
---|
| 4903 | + if (test_bit(0, &adapter->resetting)) |
---|
4353 | 4904 | adapter->force_reset_recovery = true; |
---|
4354 | 4905 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { |
---|
4355 | 4906 | dev_info(dev, "Migrated, re-enabling adapter\n"); |
---|
.. | .. |
---|
4383 | 4934 | be16_to_cpu(crq->version_exchange_rsp.version); |
---|
4384 | 4935 | dev_info(dev, "Partner protocol version is %d\n", |
---|
4385 | 4936 | ibmvnic_version); |
---|
4386 | | - send_cap_queries(adapter); |
---|
| 4937 | + send_query_cap(adapter); |
---|
4387 | 4938 | break; |
---|
4388 | 4939 | case QUERY_CAPABILITY_RSP: |
---|
4389 | 4940 | handle_query_cap_rsp(crq, adapter); |
---|
.. | .. |
---|
4421 | 4972 | crq->link_state_indication.phys_link_state; |
---|
4422 | 4973 | adapter->logical_link_state = |
---|
4423 | 4974 | crq->link_state_indication.logical_link_state; |
---|
| 4975 | + if (adapter->phys_link_state && adapter->logical_link_state) |
---|
| 4976 | + netif_carrier_on(netdev); |
---|
| 4977 | + else |
---|
| 4978 | + netif_carrier_off(netdev); |
---|
4424 | 4979 | break; |
---|
4425 | 4980 | case CHANGE_MAC_ADDR_RSP: |
---|
4426 | 4981 | netdev_dbg(netdev, "Got MAC address change Response\n"); |
---|
.. | .. |
---|
4458 | 5013 | case GET_VPD_RSP: |
---|
4459 | 5014 | handle_vpd_rsp(crq, adapter); |
---|
4460 | 5015 | break; |
---|
| 5016 | + case QUERY_PHYS_PARMS_RSP: |
---|
| 5017 | + adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); |
---|
| 5018 | + complete(&adapter->fw_done); |
---|
| 5019 | + break; |
---|
4461 | 5020 | default: |
---|
4462 | 5021 | netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", |
---|
4463 | 5022 | gen_crq->cmd); |
---|
.. | .. |
---|
4472 | 5031 | return IRQ_HANDLED; |
---|
4473 | 5032 | } |
---|
4474 | 5033 | |
---|
4475 | | -static void ibmvnic_tasklet(void *data) |
---|
| 5034 | +static void ibmvnic_tasklet(struct tasklet_struct *t) |
---|
4476 | 5035 | { |
---|
4477 | | - struct ibmvnic_adapter *adapter = data; |
---|
| 5036 | + struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); |
---|
4478 | 5037 | struct ibmvnic_crq_queue *queue = &adapter->crq; |
---|
4479 | 5038 | union ibmvnic_crq *crq; |
---|
4480 | 5039 | unsigned long flags; |
---|
.. | .. |
---|
4612 | 5171 | |
---|
4613 | 5172 | retrc = 0; |
---|
4614 | 5173 | |
---|
4615 | | - tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, |
---|
4616 | | - (unsigned long)adapter); |
---|
| 5174 | + tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); |
---|
4617 | 5175 | |
---|
4618 | 5176 | netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); |
---|
4619 | | - rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, |
---|
4620 | | - adapter); |
---|
| 5177 | + snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", |
---|
| 5178 | + adapter->vdev->unit_address); |
---|
| 5179 | + rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); |
---|
4621 | 5180 | if (rc) { |
---|
4622 | 5181 | dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", |
---|
4623 | 5182 | vdev->irq, rc); |
---|
.. | .. |
---|
4651 | 5210 | return retrc; |
---|
4652 | 5211 | } |
---|
4653 | 5212 | |
---|
4654 | | -static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) |
---|
| 5213 | +static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) |
---|
4655 | 5214 | { |
---|
4656 | 5215 | struct device *dev = &adapter->vdev->dev; |
---|
4657 | | - unsigned long timeout = msecs_to_jiffies(30000); |
---|
4658 | | - u64 old_num_rx_queues, old_num_tx_queues; |
---|
| 5216 | + unsigned long timeout = msecs_to_jiffies(20000); |
---|
| 5217 | + u64 old_num_rx_queues = adapter->req_rx_queues; |
---|
| 5218 | + u64 old_num_tx_queues = adapter->req_tx_queues; |
---|
4659 | 5219 | int rc; |
---|
4660 | 5220 | |
---|
4661 | 5221 | adapter->from_passive_init = false; |
---|
4662 | 5222 | |
---|
4663 | | - old_num_rx_queues = adapter->req_rx_queues; |
---|
4664 | | - old_num_tx_queues = adapter->req_tx_queues; |
---|
| 5223 | + if (reset) |
---|
| 5224 | + reinit_completion(&adapter->init_done); |
---|
4665 | 5225 | |
---|
4666 | | - reinit_completion(&adapter->init_done); |
---|
4667 | 5226 | adapter->init_done_rc = 0; |
---|
4668 | | - ibmvnic_send_crq_init(adapter); |
---|
| 5227 | + rc = ibmvnic_send_crq_init(adapter); |
---|
| 5228 | + if (rc) { |
---|
| 5229 | + dev_err(dev, "Send crq init failed with error %d\n", rc); |
---|
| 5230 | + return rc; |
---|
| 5231 | + } |
---|
| 5232 | + |
---|
4669 | 5233 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
---|
4670 | 5234 | dev_err(dev, "Initialization sequence timed out\n"); |
---|
4671 | 5235 | return -1; |
---|
.. | .. |
---|
4682 | 5246 | return -1; |
---|
4683 | 5247 | } |
---|
4684 | 5248 | |
---|
4685 | | - if (adapter->resetting && !adapter->wait_for_reset && |
---|
| 5249 | + if (reset && |
---|
| 5250 | + test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && |
---|
4686 | 5251 | adapter->reset_reason != VNIC_RESET_MOBILITY) { |
---|
4687 | 5252 | if (adapter->req_rx_queues != old_num_rx_queues || |
---|
4688 | 5253 | adapter->req_tx_queues != old_num_tx_queues) { |
---|
4689 | 5254 | release_sub_crqs(adapter, 0); |
---|
4690 | 5255 | rc = init_sub_crqs(adapter); |
---|
4691 | 5256 | } else { |
---|
| 5257 | + /* no need to reinitialize completely, but we do |
---|
| 5258 | + * need to clean up transmits that were in flight |
---|
| 5259 | + * when we processed the reset. Failure to do so |
---|
| 5260 | + * will confound the upper layer, usually TCP, by |
---|
| 5261 | + * creating the illusion of transmits that are |
---|
| 5262 | + * awaiting completion. |
---|
| 5263 | + */ |
---|
| 5264 | + clean_tx_pools(adapter); |
---|
| 5265 | + |
---|
4692 | 5266 | rc = reset_sub_crq_queues(adapter); |
---|
4693 | 5267 | } |
---|
4694 | 5268 | } else { |
---|
4695 | 5269 | rc = init_sub_crqs(adapter); |
---|
4696 | 5270 | } |
---|
4697 | 5271 | |
---|
4698 | | - if (rc) { |
---|
4699 | | - dev_err(dev, "Initialization of sub crqs failed\n"); |
---|
4700 | | - release_crq_queue(adapter); |
---|
4701 | | - return rc; |
---|
4702 | | - } |
---|
4703 | | - |
---|
4704 | | - rc = init_sub_crq_irqs(adapter); |
---|
4705 | | - if (rc) { |
---|
4706 | | - dev_err(dev, "Failed to initialize sub crq irqs\n"); |
---|
4707 | | - release_crq_queue(adapter); |
---|
4708 | | - } |
---|
4709 | | - |
---|
4710 | | - return rc; |
---|
4711 | | -} |
---|
4712 | | - |
---|
4713 | | -static int ibmvnic_init(struct ibmvnic_adapter *adapter) |
---|
4714 | | -{ |
---|
4715 | | - struct device *dev = &adapter->vdev->dev; |
---|
4716 | | - unsigned long timeout = msecs_to_jiffies(30000); |
---|
4717 | | - int rc; |
---|
4718 | | - |
---|
4719 | | - adapter->from_passive_init = false; |
---|
4720 | | - |
---|
4721 | | - adapter->init_done_rc = 0; |
---|
4722 | | - ibmvnic_send_crq_init(adapter); |
---|
4723 | | - if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { |
---|
4724 | | - dev_err(dev, "Initialization sequence timed out\n"); |
---|
4725 | | - return -1; |
---|
4726 | | - } |
---|
4727 | | - |
---|
4728 | | - if (adapter->init_done_rc) { |
---|
4729 | | - release_crq_queue(adapter); |
---|
4730 | | - return adapter->init_done_rc; |
---|
4731 | | - } |
---|
4732 | | - |
---|
4733 | | - if (adapter->from_passive_init) { |
---|
4734 | | - adapter->state = VNIC_OPEN; |
---|
4735 | | - adapter->from_passive_init = false; |
---|
4736 | | - return -1; |
---|
4737 | | - } |
---|
4738 | | - |
---|
4739 | | - rc = init_sub_crqs(adapter); |
---|
4740 | 5272 | if (rc) { |
---|
4741 | 5273 | dev_err(dev, "Initialization of sub crqs failed\n"); |
---|
4742 | 5274 | release_crq_queue(adapter); |
---|
.. | .. |
---|
4783 | 5315 | dev_set_drvdata(&dev->dev, netdev); |
---|
4784 | 5316 | adapter->vdev = dev; |
---|
4785 | 5317 | adapter->netdev = netdev; |
---|
| 5318 | + adapter->login_pending = false; |
---|
4786 | 5319 | |
---|
4787 | 5320 | ether_addr_copy(adapter->mac_addr, mac_addr_p); |
---|
4788 | 5321 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); |
---|
.. | .. |
---|
4794 | 5327 | spin_lock_init(&adapter->stats_lock); |
---|
4795 | 5328 | |
---|
4796 | 5329 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
---|
| 5330 | + INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, |
---|
| 5331 | + __ibmvnic_delayed_reset); |
---|
4797 | 5332 | INIT_LIST_HEAD(&adapter->rwi_list); |
---|
4798 | 5333 | spin_lock_init(&adapter->rwi_lock); |
---|
| 5334 | + spin_lock_init(&adapter->state_lock); |
---|
| 5335 | + mutex_init(&adapter->fw_lock); |
---|
4799 | 5336 | init_completion(&adapter->init_done); |
---|
4800 | | - adapter->resetting = false; |
---|
4801 | | - |
---|
4802 | | - adapter->mac_change_pending = false; |
---|
| 5337 | + init_completion(&adapter->fw_done); |
---|
| 5338 | + init_completion(&adapter->reset_done); |
---|
| 5339 | + init_completion(&adapter->stats_done); |
---|
| 5340 | + clear_bit(0, &adapter->resetting); |
---|
4803 | 5341 | |
---|
4804 | 5342 | do { |
---|
4805 | 5343 | rc = init_crq_queue(adapter); |
---|
.. | .. |
---|
4809 | 5347 | goto ibmvnic_init_fail; |
---|
4810 | 5348 | } |
---|
4811 | 5349 | |
---|
4812 | | - rc = ibmvnic_init(adapter); |
---|
| 5350 | + rc = ibmvnic_reset_init(adapter, false); |
---|
4813 | 5351 | if (rc && rc != EAGAIN) |
---|
4814 | 5352 | goto ibmvnic_init_fail; |
---|
4815 | 5353 | } while (rc == EAGAIN); |
---|
.. | .. |
---|
4831 | 5369 | goto ibmvnic_dev_file_err; |
---|
4832 | 5370 | |
---|
4833 | 5371 | netif_carrier_off(netdev); |
---|
| 5372 | + |
---|
| 5373 | + adapter->state = VNIC_PROBED; |
---|
| 5374 | + |
---|
| 5375 | + adapter->wait_for_reset = false; |
---|
| 5376 | + adapter->last_reset_time = jiffies; |
---|
| 5377 | + |
---|
4834 | 5378 | rc = register_netdev(netdev); |
---|
4835 | 5379 | if (rc) { |
---|
4836 | 5380 | dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); |
---|
4837 | 5381 | goto ibmvnic_register_fail; |
---|
4838 | 5382 | } |
---|
4839 | 5383 | dev_info(&dev->dev, "ibmvnic registered\n"); |
---|
4840 | | - |
---|
4841 | | - adapter->state = VNIC_PROBED; |
---|
4842 | | - |
---|
4843 | | - adapter->wait_for_reset = false; |
---|
4844 | 5384 | |
---|
4845 | 5385 | return 0; |
---|
4846 | 5386 | |
---|
.. | .. |
---|
4856 | 5396 | ibmvnic_init_fail: |
---|
4857 | 5397 | release_sub_crqs(adapter, 1); |
---|
4858 | 5398 | release_crq_queue(adapter); |
---|
| 5399 | + mutex_destroy(&adapter->fw_lock); |
---|
4859 | 5400 | free_netdev(netdev); |
---|
4860 | 5401 | |
---|
4861 | 5402 | return rc; |
---|
.. | .. |
---|
4865 | 5406 | { |
---|
4866 | 5407 | struct net_device *netdev = dev_get_drvdata(&dev->dev); |
---|
4867 | 5408 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
---|
| 5409 | + unsigned long flags; |
---|
4868 | 5410 | |
---|
| 5411 | + spin_lock_irqsave(&adapter->state_lock, flags); |
---|
| 5412 | + |
---|
| 5413 | + /* If ibmvnic_reset() is scheduling a reset, wait for it to |
---|
| 5414 | + * finish. Then, set the state to REMOVING to prevent it from |
---|
| 5415 | + * scheduling any more work and to have reset functions ignore |
---|
| 5416 | + * any resets that have already been scheduled. Drop the lock |
---|
| 5417 | + * after setting state, so __ibmvnic_reset() which is called |
---|
| 5418 | + * from the flush_work() below, can make progress. |
---|
| 5419 | + */ |
---|
| 5420 | + spin_lock(&adapter->rwi_lock); |
---|
4869 | 5421 | adapter->state = VNIC_REMOVING; |
---|
| 5422 | + spin_unlock(&adapter->rwi_lock); |
---|
| 5423 | + |
---|
| 5424 | + spin_unlock_irqrestore(&adapter->state_lock, flags); |
---|
| 5425 | + |
---|
| 5426 | + flush_work(&adapter->ibmvnic_reset); |
---|
| 5427 | + flush_delayed_work(&adapter->ibmvnic_delayed_reset); |
---|
| 5428 | + |
---|
4870 | 5429 | rtnl_lock(); |
---|
4871 | 5430 | unregister_netdevice(netdev); |
---|
4872 | 5431 | |
---|
.. | .. |
---|
4880 | 5439 | adapter->state = VNIC_REMOVED; |
---|
4881 | 5440 | |
---|
4882 | 5441 | rtnl_unlock(); |
---|
| 5442 | + mutex_destroy(&adapter->fw_lock); |
---|
4883 | 5443 | device_remove_file(&dev->dev, &dev_attr_failover); |
---|
4884 | 5444 | free_netdev(netdev); |
---|
4885 | 5445 | dev_set_drvdata(&dev->dev, NULL); |
---|
.. | .. |
---|
4945 | 5505 | for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) |
---|
4946 | 5506 | ret += 4 * PAGE_SIZE; /* the scrq message queue */ |
---|
4947 | 5507 | |
---|
4948 | | - for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
---|
4949 | | - i++) |
---|
| 5508 | + for (i = 0; i < adapter->num_active_rx_pools; i++) |
---|
4950 | 5509 | ret += adapter->rx_pool[i].size * |
---|
4951 | 5510 | IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); |
---|
4952 | 5511 | |
---|