.. | .. |
---|
23 | 23 | #include <linux/scatterlist.h> |
---|
24 | 24 | #include <linux/skbuff.h> |
---|
25 | 25 | #include <linux/spinlock.h> |
---|
| 26 | +#include <linux/etherdevice.h> |
---|
26 | 27 | #include <linux/if_ether.h> |
---|
27 | 28 | #include <linux/if_vlan.h> |
---|
28 | 29 | #include <linux/delay.h> |
---|
.. | .. |
---|
126 | 127 | struct scsi_cmnd *sc) |
---|
127 | 128 | { |
---|
128 | 129 | if (io_req->sgl_list_pa) |
---|
129 | | - pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, |
---|
| 130 | + dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, |
---|
130 | 131 | sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, |
---|
131 | | - PCI_DMA_TODEVICE); |
---|
| 132 | + DMA_TO_DEVICE); |
---|
132 | 133 | scsi_dma_unmap(sc); |
---|
133 | 134 | |
---|
134 | 135 | if (io_req->sgl_cnt) |
---|
135 | 136 | mempool_free(io_req->sgl_list_alloc, |
---|
136 | 137 | fnic->io_sgl_pool[io_req->sgl_type]); |
---|
137 | 138 | if (io_req->sense_buf_pa) |
---|
138 | | - pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, |
---|
139 | | - SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); |
---|
| 139 | + dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, |
---|
| 140 | + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
---|
140 | 141 | } |
---|
141 | 142 | |
---|
142 | 143 | /* Free up Copy Wq descriptors. Called with copy_wq lock held */ |
---|
.. | .. |
---|
180 | 181 | __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, |
---|
181 | 182 | unsigned long clearbits) |
---|
182 | 183 | { |
---|
183 | | - struct Scsi_Host *host = fnic->lport->host; |
---|
184 | | - int sh_locked = spin_is_locked(host->host_lock); |
---|
185 | 184 | unsigned long flags = 0; |
---|
| 185 | + unsigned long host_lock_flags = 0; |
---|
186 | 186 | |
---|
187 | | - if (!sh_locked) |
---|
188 | | - spin_lock_irqsave(host->host_lock, flags); |
---|
| 187 | + spin_lock_irqsave(&fnic->fnic_lock, flags); |
---|
| 188 | + spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); |
---|
189 | 189 | |
---|
190 | 190 | if (clearbits) |
---|
191 | 191 | fnic->state_flags &= ~st_flags; |
---|
192 | 192 | else |
---|
193 | 193 | fnic->state_flags |= st_flags; |
---|
194 | 194 | |
---|
195 | | - if (!sh_locked) |
---|
196 | | - spin_unlock_irqrestore(host->host_lock, flags); |
---|
| 195 | + spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags); |
---|
| 196 | + spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
---|
197 | 197 | |
---|
198 | 198 | return; |
---|
199 | 199 | } |
---|
.. | .. |
---|
276 | 276 | } |
---|
277 | 277 | |
---|
278 | 278 | if (fnic->ctlr.map_dest) { |
---|
279 | | - memset(gw_mac, 0xff, ETH_ALEN); |
---|
| 279 | + eth_broadcast_addr(gw_mac); |
---|
280 | 280 | format = FCPIO_FLOGI_REG_DEF_DEST; |
---|
281 | 281 | } else { |
---|
282 | 282 | memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); |
---|
.. | .. |
---|
330 | 330 | int flags; |
---|
331 | 331 | u8 exch_flags; |
---|
332 | 332 | struct scsi_lun fc_lun; |
---|
333 | | - int r; |
---|
334 | 333 | |
---|
335 | 334 | if (sg_count) { |
---|
336 | 335 | /* For each SGE, create a device desc entry */ |
---|
.. | .. |
---|
342 | 341 | desc++; |
---|
343 | 342 | } |
---|
344 | 343 | |
---|
345 | | - io_req->sgl_list_pa = pci_map_single |
---|
346 | | - (fnic->pdev, |
---|
347 | | - io_req->sgl_list, |
---|
348 | | - sizeof(io_req->sgl_list[0]) * sg_count, |
---|
349 | | - PCI_DMA_TODEVICE); |
---|
350 | | - |
---|
351 | | - r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa); |
---|
352 | | - if (r) { |
---|
353 | | - printk(KERN_ERR "PCI mapping failed with error %d\n", r); |
---|
| 344 | + io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, |
---|
| 345 | + io_req->sgl_list, |
---|
| 346 | + sizeof(io_req->sgl_list[0]) * sg_count, |
---|
| 347 | + DMA_TO_DEVICE); |
---|
| 348 | + if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) { |
---|
| 349 | + printk(KERN_ERR "DMA mapping failed\n"); |
---|
354 | 350 | return SCSI_MLQUEUE_HOST_BUSY; |
---|
355 | 351 | } |
---|
356 | 352 | } |
---|
357 | 353 | |
---|
358 | | - io_req->sense_buf_pa = pci_map_single(fnic->pdev, |
---|
| 354 | + io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, |
---|
359 | 355 | sc->sense_buffer, |
---|
360 | 356 | SCSI_SENSE_BUFFERSIZE, |
---|
361 | | - PCI_DMA_FROMDEVICE); |
---|
362 | | - |
---|
363 | | - r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa); |
---|
364 | | - if (r) { |
---|
365 | | - pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, |
---|
| 357 | + DMA_FROM_DEVICE); |
---|
| 358 | + if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) { |
---|
| 359 | + dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, |
---|
366 | 360 | sizeof(io_req->sgl_list[0]) * sg_count, |
---|
367 | | - PCI_DMA_TODEVICE); |
---|
368 | | - printk(KERN_ERR "PCI mapping failed with error %d\n", r); |
---|
| 361 | + DMA_TO_DEVICE); |
---|
| 362 | + printk(KERN_ERR "DMA mapping failed\n"); |
---|
369 | 363 | return SCSI_MLQUEUE_HOST_BUSY; |
---|
370 | 364 | } |
---|
371 | 365 | |
---|
.. | .. |
---|
1336 | 1330 | unsigned int wq_work_done = 0; |
---|
1337 | 1331 | unsigned int i, cq_index; |
---|
1338 | 1332 | unsigned int cur_work_done; |
---|
| 1333 | + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; |
---|
| 1334 | + u64 start_jiffies = 0; |
---|
| 1335 | + u64 end_jiffies = 0; |
---|
| 1336 | + u64 delta_jiffies = 0; |
---|
| 1337 | + u64 delta_ms = 0; |
---|
1339 | 1338 | |
---|
1340 | 1339 | for (i = 0; i < fnic->wq_copy_count; i++) { |
---|
1341 | 1340 | cq_index = i + fnic->raw_wq_count + fnic->rq_count; |
---|
| 1341 | + |
---|
| 1342 | + start_jiffies = jiffies; |
---|
1342 | 1343 | cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], |
---|
1343 | 1344 | fnic_fcpio_cmpl_handler, |
---|
1344 | 1345 | copy_work_to_do); |
---|
| 1346 | + end_jiffies = jiffies; |
---|
| 1347 | + |
---|
1345 | 1348 | wq_work_done += cur_work_done; |
---|
| 1349 | + delta_jiffies = end_jiffies - start_jiffies; |
---|
| 1350 | + if (delta_jiffies > |
---|
| 1351 | + (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { |
---|
| 1352 | + atomic64_set(&misc_stats->max_isr_jiffies, |
---|
| 1353 | + delta_jiffies); |
---|
| 1354 | + delta_ms = jiffies_to_msecs(delta_jiffies); |
---|
| 1355 | + atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); |
---|
| 1356 | + atomic64_set(&misc_stats->corr_work_done, |
---|
| 1357 | + cur_work_done); |
---|
| 1358 | + } |
---|
1346 | 1359 | } |
---|
1347 | 1360 | return wq_work_done; |
---|
1348 | 1361 | } |
---|
.. | .. |
---|
1389 | 1402 | } |
---|
1390 | 1403 | if (!io_req) { |
---|
1391 | 1404 | spin_unlock_irqrestore(io_lock, flags); |
---|
1392 | | - goto cleanup_scsi_cmd; |
---|
| 1405 | + continue; |
---|
1393 | 1406 | } |
---|
1394 | 1407 | |
---|
1395 | 1408 | CMD_SP(sc) = NULL; |
---|
.. | .. |
---|
1404 | 1417 | fnic_release_ioreq_buf(fnic, io_req, sc); |
---|
1405 | 1418 | mempool_free(io_req, fnic->io_req_pool); |
---|
1406 | 1419 | |
---|
1407 | | -cleanup_scsi_cmd: |
---|
1408 | 1420 | sc->result = DID_TRANSPORT_DISRUPTED << 16; |
---|
1409 | 1421 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
---|
1410 | | - "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", |
---|
1411 | | - __func__, (jiffies - start_time)); |
---|
| 1422 | + "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", |
---|
| 1423 | + __func__, sc->request->tag, sc, |
---|
| 1424 | + (jiffies - start_time)); |
---|
1412 | 1425 | |
---|
1413 | 1426 | if (atomic64_read(&fnic->io_cmpl_skip)) |
---|
1414 | 1427 | atomic64_dec(&fnic->io_cmpl_skip); |
---|
.. | .. |
---|
1417 | 1430 | |
---|
1418 | 1431 | /* Complete the command to SCSI */ |
---|
1419 | 1432 | if (sc->scsi_done) { |
---|
| 1433 | + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) |
---|
| 1434 | + shost_printk(KERN_ERR, fnic->lport->host, |
---|
| 1435 | + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", |
---|
| 1436 | + sc->request->tag, sc); |
---|
| 1437 | + |
---|
1420 | 1438 | FNIC_TRACE(fnic_cleanup_io, |
---|
1421 | 1439 | sc->device->host->host_no, i, sc, |
---|
1422 | 1440 | jiffies_to_msecs(jiffies - start_time), |
---|
.. | .. |
---|
2276 | 2294 | static inline int |
---|
2277 | 2295 | fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) |
---|
2278 | 2296 | { |
---|
2279 | | - struct blk_queue_tag *bqt = fnic->lport->host->bqt; |
---|
2280 | | - int tag, ret = SCSI_NO_TAG; |
---|
| 2297 | + struct request_queue *q = sc->request->q; |
---|
| 2298 | + struct request *dummy; |
---|
2281 | 2299 | |
---|
2282 | | - BUG_ON(!bqt); |
---|
2283 | | - if (!bqt) { |
---|
2284 | | - pr_err("Tags are not supported\n"); |
---|
2285 | | - goto end; |
---|
2286 | | - } |
---|
| 2300 | + dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT); |
---|
| 2301 | + if (IS_ERR(dummy)) |
---|
| 2302 | + return SCSI_NO_TAG; |
---|
2287 | 2303 | |
---|
2288 | | - do { |
---|
2289 | | - tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1); |
---|
2290 | | - if (tag >= bqt->max_depth) { |
---|
2291 | | - pr_err("Tag allocation failure\n"); |
---|
2292 | | - goto end; |
---|
2293 | | - } |
---|
2294 | | - } while (test_and_set_bit(tag, bqt->tag_map)); |
---|
| 2304 | + sc->tag = sc->request->tag = dummy->tag; |
---|
| 2305 | + sc->host_scribble = (unsigned char *)dummy; |
---|
2295 | 2306 | |
---|
2296 | | - bqt->tag_index[tag] = sc->request; |
---|
2297 | | - sc->request->tag = tag; |
---|
2298 | | - sc->tag = tag; |
---|
2299 | | - if (!sc->request->special) |
---|
2300 | | - sc->request->special = sc; |
---|
2301 | | - |
---|
2302 | | - ret = tag; |
---|
2303 | | - |
---|
2304 | | -end: |
---|
2305 | | - return ret; |
---|
| 2307 | + return dummy->tag; |
---|
2306 | 2308 | } |
---|
2307 | 2309 | |
---|
2308 | 2310 | /** |
---|
.. | .. |
---|
2312 | 2314 | static inline void |
---|
2313 | 2315 | fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) |
---|
2314 | 2316 | { |
---|
2315 | | - struct blk_queue_tag *bqt = fnic->lport->host->bqt; |
---|
2316 | | - int tag = sc->request->tag; |
---|
| 2317 | + struct request *dummy = (struct request *)sc->host_scribble; |
---|
2317 | 2318 | |
---|
2318 | | - if (tag == SCSI_NO_TAG) |
---|
2319 | | - return; |
---|
2320 | | - |
---|
2321 | | - BUG_ON(!bqt || !bqt->tag_index[tag]); |
---|
2322 | | - if (!bqt) |
---|
2323 | | - return; |
---|
2324 | | - |
---|
2325 | | - bqt->tag_index[tag] = NULL; |
---|
2326 | | - clear_bit(tag, bqt->tag_map); |
---|
2327 | | - |
---|
2328 | | - return; |
---|
| 2319 | + blk_mq_free_request(dummy); |
---|
2329 | 2320 | } |
---|
2330 | 2321 | |
---|
2331 | 2322 | /* |
---|
.. | .. |
---|
2384 | 2375 | tag = sc->request->tag; |
---|
2385 | 2376 | if (unlikely(tag < 0)) { |
---|
2386 | 2377 | /* |
---|
2387 | | - * XXX(hch): current the midlayer fakes up a struct |
---|
2388 | | - * request for the explicit reset ioctls, and those |
---|
2389 | | - * don't have a tag allocated to them. The below |
---|
2390 | | - * code pokes into midlayer structures to paper over |
---|
2391 | | - * this design issue, but that won't work for blk-mq. |
---|
2392 | | - * |
---|
2393 | | - * Either someone who can actually test the hardware |
---|
2394 | | - * will have to come up with a similar hack for the |
---|
2395 | | - * blk-mq case, or we'll have to bite the bullet and |
---|
2396 | | - * fix the way the EH ioctls work for real, but until |
---|
2397 | | - * that happens we fail these explicit requests here. |
---|
| 2378 | + * Really should fix the midlayer to pass in a proper |
---|
| 2379 | + * request for ioctls... |
---|
2398 | 2380 | */ |
---|
2399 | | - |
---|
2400 | 2381 | tag = fnic_scsi_host_start_tag(fnic, sc); |
---|
2401 | 2382 | if (unlikely(tag == SCSI_NO_TAG)) |
---|
2402 | 2383 | goto fnic_device_reset_end; |
---|
.. | .. |
---|
2643 | 2624 | unsigned long flags; |
---|
2644 | 2625 | |
---|
2645 | 2626 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
---|
2646 | | - if (fnic->internal_reset_inprogress == 0) { |
---|
2647 | | - fnic->internal_reset_inprogress = 1; |
---|
| 2627 | + if (!fnic->internal_reset_inprogress) { |
---|
| 2628 | + fnic->internal_reset_inprogress = true; |
---|
2648 | 2629 | } else { |
---|
2649 | 2630 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
---|
2650 | 2631 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
---|
.. | .. |
---|
2673 | 2654 | } |
---|
2674 | 2655 | |
---|
2675 | 2656 | spin_lock_irqsave(&fnic->fnic_lock, flags); |
---|
2676 | | - fnic->internal_reset_inprogress = 0; |
---|
| 2657 | + fnic->internal_reset_inprogress = false; |
---|
2677 | 2658 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); |
---|
2678 | 2659 | return ret; |
---|
2679 | 2660 | } |
---|