.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* esp_scsi.c: ESP SCSI driver. |
---|
2 | 3 | * |
---|
3 | 4 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) |
---|
.. | .. |
---|
242 | 243 | /* Reset the ESP chip, _not_ the SCSI bus. */ |
---|
243 | 244 | static void esp_reset_esp(struct esp *esp) |
---|
244 | 245 | { |
---|
245 | | - u8 family_code, version; |
---|
246 | | - |
---|
247 | 246 | /* Now reset the ESP chip */ |
---|
248 | 247 | scsi_esp_cmd(esp, ESP_CMD_RC); |
---|
249 | 248 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); |
---|
.. | .. |
---|
256 | 255 | */ |
---|
257 | 256 | esp->max_period = ((35 * esp->ccycle) / 1000); |
---|
258 | 257 | if (esp->rev == FAST) { |
---|
259 | | - version = esp_read8(ESP_UID); |
---|
260 | | - family_code = (version & 0xf8) >> 3; |
---|
261 | | - if (family_code == 0x02) |
---|
| 258 | + u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); |
---|
| 259 | + |
---|
| 260 | + if (family_code == ESP_UID_F236) { |
---|
262 | 261 | esp->rev = FAS236; |
---|
263 | | - else if (family_code == 0x0a) |
---|
| 262 | + } else if (family_code == ESP_UID_HME) { |
---|
264 | 263 | esp->rev = FASHME; /* Version is usually '5'. */ |
---|
265 | | - else |
---|
| 264 | + } else if (family_code == ESP_UID_FSC) { |
---|
| 265 | + esp->rev = FSC; |
---|
| 266 | + /* Enable Active Negation */ |
---|
| 267 | + esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); |
---|
| 268 | + } else { |
---|
266 | 269 | esp->rev = FAS100A; |
---|
| 270 | + } |
---|
267 | 271 | esp->min_period = ((4 * esp->ccycle) / 1000); |
---|
268 | 272 | } else { |
---|
269 | 273 | esp->min_period = ((5 * esp->ccycle) / 1000); |
---|
.. | .. |
---|
303 | 307 | |
---|
304 | 308 | case FASHME: |
---|
305 | 309 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); |
---|
306 | | - /* fallthrough... */ |
---|
| 310 | + fallthrough; |
---|
307 | 311 | |
---|
308 | 312 | case FAS236: |
---|
309 | 313 | case PCSCSI: |
---|
310 | | - /* Fast 236, AM53c974 or HME */ |
---|
| 314 | + case FSC: |
---|
311 | 315 | esp_write8(esp->config2, ESP_CFG2); |
---|
312 | 316 | if (esp->rev == FASHME) { |
---|
313 | 317 | u8 cfg3 = esp->target[0].esp_config3; |
---|
.. | .. |
---|
369 | 373 | { |
---|
370 | 374 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); |
---|
371 | 375 | struct scatterlist *sg = scsi_sglist(cmd); |
---|
372 | | - int dir = cmd->sc_data_direction; |
---|
373 | | - int total, i; |
---|
| 376 | + int total = 0, i; |
---|
| 377 | + struct scatterlist *s; |
---|
374 | 378 | |
---|
375 | | - if (dir == DMA_NONE) |
---|
| 379 | + if (cmd->sc_data_direction == DMA_NONE) |
---|
376 | 380 | return; |
---|
377 | 381 | |
---|
378 | | - spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir); |
---|
379 | | - spriv->cur_residue = sg_dma_len(sg); |
---|
380 | | - spriv->cur_sg = sg; |
---|
| 382 | + if (esp->flags & ESP_FLAG_NO_DMA_MAP) { |
---|
| 383 | + /* |
---|
| 384 | + * For pseudo DMA and PIO we need the virtual address instead of |
---|
| 385 | + * a dma address, so perform an identity mapping. |
---|
| 386 | + */ |
---|
| 387 | + spriv->num_sg = scsi_sg_count(cmd); |
---|
381 | 388 | |
---|
382 | | - total = 0; |
---|
383 | | - for (i = 0; i < spriv->u.num_sg; i++) |
---|
384 | | - total += sg_dma_len(&sg[i]); |
---|
| 389 | + scsi_for_each_sg(cmd, s, spriv->num_sg, i) { |
---|
| 390 | + s->dma_address = (uintptr_t)sg_virt(s); |
---|
| 391 | + total += sg_dma_len(s); |
---|
| 392 | + } |
---|
| 393 | + } else { |
---|
| 394 | + spriv->num_sg = scsi_dma_map(cmd); |
---|
| 395 | + scsi_for_each_sg(cmd, s, spriv->num_sg, i) |
---|
| 396 | + total += sg_dma_len(s); |
---|
| 397 | + } |
---|
| 398 | + spriv->cur_residue = sg_dma_len(sg); |
---|
| 399 | + spriv->prv_sg = NULL; |
---|
| 400 | + spriv->cur_sg = sg; |
---|
385 | 401 | spriv->tot_residue = total; |
---|
386 | 402 | } |
---|
387 | 403 | |
---|
.. | .. |
---|
434 | 450 | p->tot_residue = 0; |
---|
435 | 451 | } |
---|
436 | 452 | if (!p->cur_residue && p->tot_residue) { |
---|
437 | | - p->cur_sg++; |
---|
| 453 | + p->prv_sg = p->cur_sg; |
---|
| 454 | + p->cur_sg = sg_next(p->cur_sg); |
---|
438 | 455 | p->cur_residue = sg_dma_len(p->cur_sg); |
---|
439 | 456 | } |
---|
440 | 457 | } |
---|
441 | 458 | |
---|
442 | 459 | static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) |
---|
443 | 460 | { |
---|
444 | | - struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); |
---|
445 | | - int dir = cmd->sc_data_direction; |
---|
446 | | - |
---|
447 | | - if (dir == DMA_NONE) |
---|
448 | | - return; |
---|
449 | | - |
---|
450 | | - esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir); |
---|
| 461 | + if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) |
---|
| 462 | + scsi_dma_unmap(cmd); |
---|
451 | 463 | } |
---|
452 | 464 | |
---|
453 | 465 | static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) |
---|
.. | .. |
---|
460 | 472 | return; |
---|
461 | 473 | } |
---|
462 | 474 | ent->saved_cur_residue = spriv->cur_residue; |
---|
| 475 | + ent->saved_prv_sg = spriv->prv_sg; |
---|
463 | 476 | ent->saved_cur_sg = spriv->cur_sg; |
---|
464 | 477 | ent->saved_tot_residue = spriv->tot_residue; |
---|
465 | 478 | } |
---|
.. | .. |
---|
474 | 487 | return; |
---|
475 | 488 | } |
---|
476 | 489 | spriv->cur_residue = ent->saved_cur_residue; |
---|
| 490 | + spriv->prv_sg = ent->saved_prv_sg; |
---|
477 | 491 | spriv->cur_sg = ent->saved_cur_sg; |
---|
478 | 492 | spriv->tot_residue = ent->saved_tot_residue; |
---|
479 | | -} |
---|
480 | | - |
---|
481 | | -static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) |
---|
482 | | -{ |
---|
483 | | - if (cmd->cmd_len == 6 || |
---|
484 | | - cmd->cmd_len == 10 || |
---|
485 | | - cmd->cmd_len == 12) { |
---|
486 | | - esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; |
---|
487 | | - } else { |
---|
488 | | - esp->flags |= ESP_FLAG_DOING_SLOWCMD; |
---|
489 | | - } |
---|
490 | 493 | } |
---|
491 | 494 | |
---|
492 | 495 | static void esp_write_tgt_config3(struct esp *esp, int tgt) |
---|
.. | .. |
---|
624 | 627 | } |
---|
625 | 628 | } |
---|
626 | 629 | |
---|
| 630 | +static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) |
---|
| 631 | +{ |
---|
| 632 | + ent->sense_ptr = ent->cmd->sense_buffer; |
---|
| 633 | + if (esp->flags & ESP_FLAG_NO_DMA_MAP) { |
---|
| 634 | + ent->sense_dma = (uintptr_t)ent->sense_ptr; |
---|
| 635 | + return; |
---|
| 636 | + } |
---|
| 637 | + |
---|
| 638 | + ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, |
---|
| 639 | + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
---|
| 640 | +} |
---|
| 641 | + |
---|
| 642 | +static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) |
---|
| 643 | +{ |
---|
| 644 | + if (!(esp->flags & ESP_FLAG_NO_DMA_MAP)) |
---|
| 645 | + dma_unmap_single(esp->dev, ent->sense_dma, |
---|
| 646 | + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
---|
| 647 | + ent->sense_ptr = NULL; |
---|
| 648 | +} |
---|
| 649 | + |
---|
627 | 650 | /* When a contingent allegiance conditon is created, we force feed a |
---|
628 | 651 | * REQUEST_SENSE command to the device to fetch the sense data. I |
---|
629 | 652 | * tried many other schemes, relying on the scsi error handling layer |
---|
.. | .. |
---|
645 | 668 | if (!ent->sense_ptr) { |
---|
646 | 669 | esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", |
---|
647 | 670 | tgt, lun); |
---|
648 | | - |
---|
649 | | - ent->sense_ptr = cmd->sense_buffer; |
---|
650 | | - ent->sense_dma = esp->ops->map_single(esp, |
---|
651 | | - ent->sense_ptr, |
---|
652 | | - SCSI_SENSE_BUFFERSIZE, |
---|
653 | | - DMA_FROM_DEVICE); |
---|
| 671 | + esp_map_sense(esp, ent); |
---|
654 | 672 | } |
---|
655 | 673 | ent->saved_sense_ptr = ent->sense_ptr; |
---|
656 | 674 | |
---|
.. | .. |
---|
717 | 735 | static void esp_maybe_execute_command(struct esp *esp) |
---|
718 | 736 | { |
---|
719 | 737 | struct esp_target_data *tp; |
---|
720 | | - struct esp_lun_data *lp; |
---|
721 | 738 | struct scsi_device *dev; |
---|
722 | 739 | struct scsi_cmnd *cmd; |
---|
723 | 740 | struct esp_cmd_entry *ent; |
---|
| 741 | + bool select_and_stop = false; |
---|
724 | 742 | int tgt, lun, i; |
---|
725 | 743 | u32 val, start_cmd; |
---|
726 | 744 | u8 *p; |
---|
.. | .. |
---|
743 | 761 | tgt = dev->id; |
---|
744 | 762 | lun = dev->lun; |
---|
745 | 763 | tp = &esp->target[tgt]; |
---|
746 | | - lp = dev->hostdata; |
---|
747 | 764 | |
---|
748 | 765 | list_move(&ent->list, &esp->active_cmds); |
---|
749 | 766 | |
---|
.. | .. |
---|
752 | 769 | esp_map_dma(esp, cmd); |
---|
753 | 770 | esp_save_pointers(esp, ent); |
---|
754 | 771 | |
---|
755 | | - esp_check_command_len(esp, cmd); |
---|
| 772 | + if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12)) |
---|
| 773 | + select_and_stop = true; |
---|
756 | 774 | |
---|
757 | 775 | p = esp->command_block; |
---|
758 | 776 | |
---|
.. | .. |
---|
793 | 811 | tp->flags &= ~ESP_TGT_CHECK_NEGO; |
---|
794 | 812 | } |
---|
795 | 813 | |
---|
796 | | - /* Process it like a slow command. */ |
---|
797 | | - if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) |
---|
798 | | - esp->flags |= ESP_FLAG_DOING_SLOWCMD; |
---|
| 814 | + /* If there are multiple message bytes, use Select and Stop */ |
---|
| 815 | + if (esp->msg_out_len) |
---|
| 816 | + select_and_stop = true; |
---|
799 | 817 | } |
---|
800 | 818 | |
---|
801 | 819 | build_identify: |
---|
802 | | - /* If we don't have a lun-data struct yet, we're probing |
---|
803 | | - * so do not disconnect. Also, do not disconnect unless |
---|
804 | | - * we have a tag on this command. |
---|
805 | | - */ |
---|
806 | | - if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) |
---|
807 | | - *p++ = IDENTIFY(1, lun); |
---|
808 | | - else |
---|
809 | | - *p++ = IDENTIFY(0, lun); |
---|
| 820 | + *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun); |
---|
810 | 821 | |
---|
811 | 822 | if (ent->tag[0] && esp->rev == ESP100) { |
---|
812 | 823 | /* ESP100 lacks select w/atn3 command, use select |
---|
813 | 824 | * and stop instead. |
---|
814 | 825 | */ |
---|
815 | | - esp->flags |= ESP_FLAG_DOING_SLOWCMD; |
---|
| 826 | + select_and_stop = true; |
---|
816 | 827 | } |
---|
817 | 828 | |
---|
818 | | - if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { |
---|
819 | | - start_cmd = ESP_CMD_SELA; |
---|
820 | | - if (ent->tag[0]) { |
---|
821 | | - *p++ = ent->tag[0]; |
---|
822 | | - *p++ = ent->tag[1]; |
---|
823 | | - |
---|
824 | | - start_cmd = ESP_CMD_SA3; |
---|
825 | | - } |
---|
826 | | - |
---|
827 | | - for (i = 0; i < cmd->cmd_len; i++) |
---|
828 | | - *p++ = cmd->cmnd[i]; |
---|
829 | | - |
---|
830 | | - esp->select_state = ESP_SELECT_BASIC; |
---|
831 | | - } else { |
---|
| 829 | + if (select_and_stop) { |
---|
832 | 830 | esp->cmd_bytes_left = cmd->cmd_len; |
---|
833 | 831 | esp->cmd_bytes_ptr = &cmd->cmnd[0]; |
---|
834 | 832 | |
---|
.. | .. |
---|
843 | 841 | |
---|
844 | 842 | start_cmd = ESP_CMD_SELAS; |
---|
845 | 843 | esp->select_state = ESP_SELECT_MSGOUT; |
---|
| 844 | + } else { |
---|
| 845 | + start_cmd = ESP_CMD_SELA; |
---|
| 846 | + if (ent->tag[0]) { |
---|
| 847 | + *p++ = ent->tag[0]; |
---|
| 848 | + *p++ = ent->tag[1]; |
---|
| 849 | + |
---|
| 850 | + start_cmd = ESP_CMD_SA3; |
---|
| 851 | + } |
---|
| 852 | + |
---|
| 853 | + for (i = 0; i < cmd->cmd_len; i++) |
---|
| 854 | + *p++ = cmd->cmnd[i]; |
---|
| 855 | + |
---|
| 856 | + esp->select_state = ESP_SELECT_BASIC; |
---|
846 | 857 | } |
---|
847 | 858 | val = tgt; |
---|
848 | 859 | if (esp->rev == FASHME) |
---|
.. | .. |
---|
902 | 913 | } |
---|
903 | 914 | |
---|
904 | 915 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { |
---|
905 | | - esp->ops->unmap_single(esp, ent->sense_dma, |
---|
906 | | - SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
---|
907 | | - ent->sense_ptr = NULL; |
---|
| 916 | + esp_unmap_sense(esp, ent); |
---|
908 | 917 | |
---|
909 | 918 | /* Restore the message/status bytes to what we actually |
---|
910 | 919 | * saw originally. Also, report that we are providing |
---|
.. | .. |
---|
965 | 974 | cmd->scsi_done = done; |
---|
966 | 975 | |
---|
967 | 976 | spriv = ESP_CMD_PRIV(cmd); |
---|
968 | | - spriv->u.dma_addr = ~(dma_addr_t)0x0; |
---|
| 977 | + spriv->num_sg = 0; |
---|
969 | 978 | |
---|
970 | 979 | list_add_tail(&ent->list, &esp->queued_cmds); |
---|
971 | 980 | |
---|
.. | .. |
---|
1032 | 1041 | |
---|
1033 | 1042 | static void esp_schedule_reset(struct esp *esp) |
---|
1034 | 1043 | { |
---|
1035 | | - esp_log_reset("esp_schedule_reset() from %pf\n", |
---|
| 1044 | + esp_log_reset("esp_schedule_reset() from %ps\n", |
---|
1036 | 1045 | __builtin_return_address(0)); |
---|
1037 | 1046 | esp->flags |= ESP_FLAG_RESETTING; |
---|
1038 | 1047 | esp_event(esp, ESP_EVENT_RESET); |
---|
.. | .. |
---|
1252 | 1261 | esp_unmap_dma(esp, cmd); |
---|
1253 | 1262 | esp_free_lun_tag(ent, cmd->device->hostdata); |
---|
1254 | 1263 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); |
---|
1255 | | - esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; |
---|
1256 | 1264 | esp->cmd_bytes_ptr = NULL; |
---|
1257 | 1265 | esp->cmd_bytes_left = 0; |
---|
1258 | 1266 | } else { |
---|
1259 | | - esp->ops->unmap_single(esp, ent->sense_dma, |
---|
1260 | | - SCSI_SENSE_BUFFERSIZE, |
---|
1261 | | - DMA_FROM_DEVICE); |
---|
1262 | | - ent->sense_ptr = NULL; |
---|
| 1267 | + esp_unmap_sense(esp, ent); |
---|
1263 | 1268 | } |
---|
1264 | 1269 | |
---|
1265 | 1270 | /* Now that the state is unwound properly, put back onto |
---|
.. | .. |
---|
1303 | 1308 | esp_flush_fifo(esp); |
---|
1304 | 1309 | } |
---|
1305 | 1310 | |
---|
1306 | | - /* If we are doing a slow command, negotiation, etc. |
---|
1307 | | - * we'll do the right thing as we transition to the |
---|
1308 | | - * next phase. |
---|
| 1311 | + /* If we are doing a Select And Stop command, negotiation, etc. |
---|
| 1312 | + * we'll do the right thing as we transition to the next phase. |
---|
1309 | 1313 | */ |
---|
1310 | 1314 | esp_event(esp, ESP_EVENT_CHECK_PHASE); |
---|
1311 | 1315 | return 0; |
---|
.. | .. |
---|
1359 | 1363 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); |
---|
1360 | 1364 | u8 *ptr; |
---|
1361 | 1365 | |
---|
1362 | | - ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg, |
---|
| 1366 | + ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg, |
---|
1363 | 1367 | &offset, &count); |
---|
1364 | 1368 | if (likely(ptr)) { |
---|
1365 | 1369 | *(ptr + offset) = bval; |
---|
.. | .. |
---|
1652 | 1656 | spriv = ESP_CMD_PRIV(ent->cmd); |
---|
1653 | 1657 | |
---|
1654 | 1658 | if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { |
---|
1655 | | - spriv->cur_sg--; |
---|
| 1659 | + spriv->cur_sg = spriv->prv_sg; |
---|
1656 | 1660 | spriv->cur_residue = 1; |
---|
1657 | 1661 | } else |
---|
1658 | 1662 | spriv->cur_residue++; |
---|
.. | .. |
---|
1737 | 1741 | |
---|
1738 | 1742 | case ESP_EVENT_DATA_IN: |
---|
1739 | 1743 | write = 1; |
---|
1740 | | - /* fallthru */ |
---|
| 1744 | + fallthrough; |
---|
1741 | 1745 | |
---|
1742 | 1746 | case ESP_EVENT_DATA_OUT: { |
---|
1743 | 1747 | struct esp_cmd_entry *ent = esp->active_cmd; |
---|
.. | .. |
---|
2040 | 2044 | esp_free_lun_tag(ent, cmd->device->hostdata); |
---|
2041 | 2045 | cmd->result = DID_RESET << 16; |
---|
2042 | 2046 | |
---|
2043 | | - if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { |
---|
2044 | | - esp->ops->unmap_single(esp, ent->sense_dma, |
---|
2045 | | - SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); |
---|
2046 | | - ent->sense_ptr = NULL; |
---|
2047 | | - } |
---|
| 2047 | + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) |
---|
| 2048 | + esp_unmap_sense(esp, ent); |
---|
2048 | 2049 | |
---|
2049 | 2050 | cmd->scsi_done(cmd); |
---|
2050 | 2051 | list_del(&ent->list); |
---|
.. | .. |
---|
2375 | 2376 | "ESP100A", |
---|
2376 | 2377 | "ESP236", |
---|
2377 | 2378 | "FAS236", |
---|
| 2379 | + "AM53C974", |
---|
| 2380 | + "53CF9x-2", |
---|
2378 | 2381 | "FAS100A", |
---|
2379 | 2382 | "FAST", |
---|
2380 | 2383 | "FASHME", |
---|
2381 | | - "AM53C974", |
---|
2382 | 2384 | }; |
---|
2383 | 2385 | |
---|
2384 | 2386 | static struct scsi_transport_template *esp_transport_template; |
---|
2385 | 2387 | |
---|
2386 | | -int scsi_esp_register(struct esp *esp, struct device *dev) |
---|
| 2388 | +int scsi_esp_register(struct esp *esp) |
---|
2387 | 2389 | { |
---|
2388 | 2390 | static int instance; |
---|
2389 | 2391 | int err; |
---|
.. | .. |
---|
2403 | 2405 | |
---|
2404 | 2406 | esp_bootup_reset(esp); |
---|
2405 | 2407 | |
---|
2406 | | - dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n", |
---|
| 2408 | + dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n", |
---|
2407 | 2409 | esp->host->unique_id, esp->regs, esp->dma_regs, |
---|
2408 | 2410 | esp->host->irq); |
---|
2409 | | - dev_printk(KERN_INFO, dev, |
---|
| 2411 | + dev_printk(KERN_INFO, esp->dev, |
---|
2410 | 2412 | "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", |
---|
2411 | 2413 | esp->host->unique_id, esp_chip_names[esp->rev], |
---|
2412 | 2414 | esp->cfreq / 1000000, esp->cfact, esp->scsi_id); |
---|
.. | .. |
---|
2414 | 2416 | /* Let the SCSI bus reset settle. */ |
---|
2415 | 2417 | ssleep(esp_bus_reset_settle); |
---|
2416 | 2418 | |
---|
2417 | | - err = scsi_add_host(esp->host, dev); |
---|
| 2419 | + err = scsi_add_host(esp->host, esp->dev); |
---|
2418 | 2420 | if (err) |
---|
2419 | 2421 | return err; |
---|
2420 | 2422 | |
---|
.. | .. |
---|
2685 | 2687 | .can_queue = 7, |
---|
2686 | 2688 | .this_id = 7, |
---|
2687 | 2689 | .sg_tablesize = SG_ALL, |
---|
2688 | | - .use_clustering = ENABLE_CLUSTERING, |
---|
2689 | 2690 | .max_sectors = 0xffff, |
---|
2690 | 2691 | .skip_settle_delay = 1, |
---|
2691 | 2692 | }; |
---|
.. | .. |
---|
2791 | 2792 | |
---|
2792 | 2793 | module_init(esp_init); |
---|
2793 | 2794 | module_exit(esp_exit); |
---|
| 2795 | + |
---|
| 2796 | +#ifdef CONFIG_SCSI_ESP_PIO |
---|
| 2797 | +static inline unsigned int esp_wait_for_fifo(struct esp *esp) |
---|
| 2798 | +{ |
---|
| 2799 | + int i = 500000; |
---|
| 2800 | + |
---|
| 2801 | + do { |
---|
| 2802 | + unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; |
---|
| 2803 | + |
---|
| 2804 | + if (fbytes) |
---|
| 2805 | + return fbytes; |
---|
| 2806 | + |
---|
| 2807 | + udelay(1); |
---|
| 2808 | + } while (--i); |
---|
| 2809 | + |
---|
| 2810 | + shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n", |
---|
| 2811 | + esp_read8(ESP_STATUS)); |
---|
| 2812 | + return 0; |
---|
| 2813 | +} |
---|
| 2814 | + |
---|
| 2815 | +static inline int esp_wait_for_intr(struct esp *esp) |
---|
| 2816 | +{ |
---|
| 2817 | + int i = 500000; |
---|
| 2818 | + |
---|
| 2819 | + do { |
---|
| 2820 | + esp->sreg = esp_read8(ESP_STATUS); |
---|
| 2821 | + if (esp->sreg & ESP_STAT_INTR) |
---|
| 2822 | + return 0; |
---|
| 2823 | + |
---|
| 2824 | + udelay(1); |
---|
| 2825 | + } while (--i); |
---|
| 2826 | + |
---|
| 2827 | + shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n", |
---|
| 2828 | + esp->sreg); |
---|
| 2829 | + return 1; |
---|
| 2830 | +} |
---|
| 2831 | + |
---|
| 2832 | +#define ESP_FIFO_SIZE 16 |
---|
| 2833 | + |
---|
| 2834 | +void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, |
---|
| 2835 | + u32 dma_count, int write, u8 cmd) |
---|
| 2836 | +{ |
---|
| 2837 | + u8 phase = esp->sreg & ESP_STAT_PMASK; |
---|
| 2838 | + |
---|
| 2839 | + cmd &= ~ESP_CMD_DMA; |
---|
| 2840 | + esp->send_cmd_error = 0; |
---|
| 2841 | + |
---|
| 2842 | + if (write) { |
---|
| 2843 | + u8 *dst = (u8 *)addr; |
---|
| 2844 | + u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV); |
---|
| 2845 | + |
---|
| 2846 | + scsi_esp_cmd(esp, cmd); |
---|
| 2847 | + |
---|
| 2848 | + while (1) { |
---|
| 2849 | + if (!esp_wait_for_fifo(esp)) |
---|
| 2850 | + break; |
---|
| 2851 | + |
---|
| 2852 | + *dst++ = readb(esp->fifo_reg); |
---|
| 2853 | + --esp_count; |
---|
| 2854 | + |
---|
| 2855 | + if (!esp_count) |
---|
| 2856 | + break; |
---|
| 2857 | + |
---|
| 2858 | + if (esp_wait_for_intr(esp)) { |
---|
| 2859 | + esp->send_cmd_error = 1; |
---|
| 2860 | + break; |
---|
| 2861 | + } |
---|
| 2862 | + |
---|
| 2863 | + if ((esp->sreg & ESP_STAT_PMASK) != phase) |
---|
| 2864 | + break; |
---|
| 2865 | + |
---|
| 2866 | + esp->ireg = esp_read8(ESP_INTRPT); |
---|
| 2867 | + if (esp->ireg & mask) { |
---|
| 2868 | + esp->send_cmd_error = 1; |
---|
| 2869 | + break; |
---|
| 2870 | + } |
---|
| 2871 | + |
---|
| 2872 | + if (phase == ESP_MIP) |
---|
| 2873 | + esp_write8(ESP_CMD_MOK, ESP_CMD); |
---|
| 2874 | + |
---|
| 2875 | + esp_write8(ESP_CMD_TI, ESP_CMD); |
---|
| 2876 | + } |
---|
| 2877 | + } else { |
---|
| 2878 | + unsigned int n = ESP_FIFO_SIZE; |
---|
| 2879 | + u8 *src = (u8 *)addr; |
---|
| 2880 | + |
---|
| 2881 | + scsi_esp_cmd(esp, ESP_CMD_FLUSH); |
---|
| 2882 | + |
---|
| 2883 | + if (n > esp_count) |
---|
| 2884 | + n = esp_count; |
---|
| 2885 | + writesb(esp->fifo_reg, src, n); |
---|
| 2886 | + src += n; |
---|
| 2887 | + esp_count -= n; |
---|
| 2888 | + |
---|
| 2889 | + scsi_esp_cmd(esp, cmd); |
---|
| 2890 | + |
---|
| 2891 | + while (esp_count) { |
---|
| 2892 | + if (esp_wait_for_intr(esp)) { |
---|
| 2893 | + esp->send_cmd_error = 1; |
---|
| 2894 | + break; |
---|
| 2895 | + } |
---|
| 2896 | + |
---|
| 2897 | + if ((esp->sreg & ESP_STAT_PMASK) != phase) |
---|
| 2898 | + break; |
---|
| 2899 | + |
---|
| 2900 | + esp->ireg = esp_read8(ESP_INTRPT); |
---|
| 2901 | + if (esp->ireg & ~ESP_INTR_BSERV) { |
---|
| 2902 | + esp->send_cmd_error = 1; |
---|
| 2903 | + break; |
---|
| 2904 | + } |
---|
| 2905 | + |
---|
| 2906 | + n = ESP_FIFO_SIZE - |
---|
| 2907 | + (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES); |
---|
| 2908 | + |
---|
| 2909 | + if (n > esp_count) |
---|
| 2910 | + n = esp_count; |
---|
| 2911 | + writesb(esp->fifo_reg, src, n); |
---|
| 2912 | + src += n; |
---|
| 2913 | + esp_count -= n; |
---|
| 2914 | + |
---|
| 2915 | + esp_write8(ESP_CMD_TI, ESP_CMD); |
---|
| 2916 | + } |
---|
| 2917 | + } |
---|
| 2918 | + |
---|
| 2919 | + esp->send_cmd_residual = esp_count; |
---|
| 2920 | +} |
---|
| 2921 | +EXPORT_SYMBOL(esp_send_pio_cmd); |
---|
| 2922 | +#endif |
---|