forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/scsi/cxgbi/libcxgbi.c
....@@ -77,9 +77,9 @@
7777 {
7878 struct cxgbi_ports_map *pmap = &cdev->pmap;
7979
80
- pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
81
- sizeof(struct cxgbi_sock *),
82
- GFP_KERNEL);
80
+ pmap->port_csk = kvzalloc(array_size(max_conn,
81
+ sizeof(struct cxgbi_sock *)),
82
+ GFP_KERNEL | __GFP_NOWARN);
8383 if (!pmap->port_csk) {
8484 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
8585 return -ENOMEM;
....@@ -124,7 +124,7 @@
124124 if (cdev->cdev2ppm)
125125 cxgbi_ppm_release(cdev->cdev2ppm(cdev));
126126 if (cdev->pmap.max_connect)
127
- cxgbi_free_big_mem(cdev->pmap.port_csk);
127
+ kvfree(cdev->pmap.port_csk);
128128 kfree(cdev);
129129 }
130130
....@@ -283,7 +283,6 @@
283283 }
284284 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
285285
286
-#if IS_ENABLED(CONFIG_IPV6)
287286 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
288287 int *port)
289288 {
....@@ -316,7 +315,6 @@
316315 ndev, ndev->name);
317316 return NULL;
318317 }
319
-#endif
320318
321319 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
322320 {
....@@ -361,12 +359,14 @@
361359 shost->max_lun = max_lun;
362360 shost->max_id = max_conns - 1;
363361 shost->max_channel = 0;
364
- shost->max_cmd_len = 16;
362
+ shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
365363
366364 chba = iscsi_host_priv(shost);
367365 chba->cdev = cdev;
368366 chba->ndev = cdev->ports[i];
369367 chba->shost = shost;
368
+
369
+ shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX;
370370
371371 log_debug(1 << CXGBI_DBG_DEV,
372372 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
....@@ -658,6 +658,8 @@
658658 }
659659
660660 cdev = cxgbi_device_find_by_netdev(ndev, &port);
661
+ if (!cdev)
662
+ cdev = cxgbi_device_find_by_mac(ndev, &port);
661663 if (!cdev) {
662664 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
663665 &daddr->sin_addr.s_addr, ndev->name);
....@@ -790,7 +792,8 @@
790792 csk->mtu = mtu;
791793 csk->dst = dst;
792794
793
- if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
795
+ rt6_get_prefsrc(rt, &pref_saddr);
796
+ if (ipv6_addr_any(&pref_saddr)) {
794797 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
795798
796799 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
....@@ -800,8 +803,6 @@
800803 &daddr6->sin6_addr);
801804 goto rel_rt;
802805 }
803
- } else {
804
- pref_saddr = rt->rt6i_prefsrc.addr;
805806 }
806807
807808 csk->csk_family = AF_INET6;
....@@ -1137,88 +1138,12 @@
11371138 }
11381139 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
11391140
1140
-static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1141
-{
1142
- struct cxgbi_device *cdev = csk->cdev;
1143
- struct sk_buff *next;
1144
- int err, copied = 0;
1145
-
1146
- spin_lock_bh(&csk->lock);
1147
-
1148
- if (csk->state != CTP_ESTABLISHED) {
1149
- log_debug(1 << CXGBI_DBG_PDU_TX,
1150
- "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1151
- csk, csk->state, csk->flags, csk->tid);
1152
- err = -EAGAIN;
1153
- goto out_err;
1154
- }
1155
-
1156
- if (csk->err) {
1157
- log_debug(1 << CXGBI_DBG_PDU_TX,
1158
- "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1159
- csk, csk->state, csk->flags, csk->tid, csk->err);
1160
- err = -EPIPE;
1161
- goto out_err;
1162
- }
1163
-
1164
- if (csk->write_seq - csk->snd_una >= csk->snd_win) {
1165
- log_debug(1 << CXGBI_DBG_PDU_TX,
1166
- "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1167
- csk, csk->state, csk->flags, csk->tid, csk->write_seq,
1168
- csk->snd_una, csk->snd_win);
1169
- err = -ENOBUFS;
1170
- goto out_err;
1171
- }
1172
-
1173
- while (skb) {
1174
- int frags = skb_shinfo(skb)->nr_frags +
1175
- (skb->len != skb->data_len);
1176
-
1177
- if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
1178
- pr_err("csk 0x%p, skb head %u < %u.\n",
1179
- csk, skb_headroom(skb), cdev->skb_tx_rsvd);
1180
- err = -EINVAL;
1181
- goto out_err;
1182
- }
1183
-
1184
- if (frags >= SKB_WR_LIST_SIZE) {
1185
- pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1186
- csk, skb_shinfo(skb)->nr_frags, skb->len,
1187
- skb->data_len, (uint)(SKB_WR_LIST_SIZE));
1188
- err = -EINVAL;
1189
- goto out_err;
1190
- }
1191
-
1192
- next = skb->next;
1193
- skb->next = NULL;
1194
- cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
1195
- cxgbi_sock_skb_entail(csk, skb);
1196
- copied += skb->len;
1197
- csk->write_seq += skb->len +
1198
- cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
1199
- skb = next;
1200
- }
1201
-
1202
- if (likely(skb_queue_len(&csk->write_queue)))
1203
- cdev->csk_push_tx_frames(csk, 1);
1204
-done:
1205
- spin_unlock_bh(&csk->lock);
1206
- return copied;
1207
-
1208
-out_err:
1209
- if (copied == 0 && err == -EPIPE)
1210
- copied = csk->err ? csk->err : -EPIPE;
1211
- else
1212
- copied = err;
1213
- goto done;
1214
-}
1215
-
12161141 static inline void
12171142 scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
12181143 unsigned int *sgcnt, unsigned int *dlen,
12191144 unsigned int prot)
12201145 {
1221
- struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc);
1146
+ struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb;
12221147
12231148 *sgl = sdb->table.sgl;
12241149 *sgcnt = sdb->table.nents;
....@@ -1285,16 +1210,15 @@
12851210 * APIs interacting with open-iscsi libraries
12861211 */
12871212
1288
-static unsigned char padding[4];
1289
-
1290
-void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
1291
- struct cxgbi_tag_format *tformat, unsigned int ppmax,
1292
- unsigned int llimit, unsigned int start,
1293
- unsigned int rsvd_factor)
1213
+int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
1214
+ struct cxgbi_tag_format *tformat,
1215
+ unsigned int iscsi_size, unsigned int llimit,
1216
+ unsigned int start, unsigned int rsvd_factor,
1217
+ unsigned int edram_start, unsigned int edram_size)
12941218 {
12951219 int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
1296
- cdev->lldev, tformat, ppmax, llimit, start,
1297
- rsvd_factor);
1220
+ cdev->lldev, tformat, iscsi_size, llimit, start,
1221
+ rsvd_factor, edram_start, edram_size);
12981222
12991223 if (err >= 0) {
13001224 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
....@@ -1306,6 +1230,8 @@
13061230 } else {
13071231 cdev->flags |= CXGBI_FLAG_DDP_OFF;
13081232 }
1233
+
1234
+ return err;
13091235 }
13101236 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
13111237
....@@ -1434,8 +1360,7 @@
14341360 log_debug(1 << CXGBI_DBG_DDP,
14351361 "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
14361362 cdev, task, tag);
1437
- if (sc &&
1438
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1363
+ if (sc && sc->sc_data_direction == DMA_FROM_DEVICE &&
14391364 cxgbi_ppm_is_ddp_tag(ppm, tag)) {
14401365 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
14411366 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
....@@ -1467,9 +1392,7 @@
14671392 u32 tag = 0;
14681393 int err = -EINVAL;
14691394
1470
- if (sc &&
1471
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)
1472
- ) {
1395
+ if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) {
14731396 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
14741397 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
14751398
....@@ -1834,9 +1757,10 @@
18341757 return -EFAULT;
18351758 }
18361759
1837
-static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1838
- unsigned int dlen, struct page_frag *frags,
1839
- int frag_max)
1760
+static int
1761
+sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1762
+ unsigned int dlen, struct page_frag *frags,
1763
+ int frag_max, u32 *dlimit)
18401764 {
18411765 unsigned int datalen = dlen;
18421766 unsigned int sglen = sg->length - sgoffset;
....@@ -1868,6 +1792,7 @@
18681792 if (i >= frag_max) {
18691793 pr_warn("too many pages %u, dlen %u.\n",
18701794 frag_max, dlen);
1795
+ *dlimit = dlen - datalen;
18711796 return -EINVAL;
18721797 }
18731798
....@@ -1884,38 +1809,220 @@
18841809 return i;
18851810 }
18861811
1887
-int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1812
+static void cxgbi_task_data_sgl_check(struct iscsi_task *task)
18881813 {
1889
- struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1814
+ struct scsi_cmnd *sc = task->sc;
1815
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1816
+ struct scatterlist *sg, *sgl = NULL;
1817
+ u32 sgcnt = 0;
1818
+ int i;
1819
+
1820
+ tdata->flags = CXGBI_TASK_SGL_CHECKED;
1821
+ if (!sc)
1822
+ return;
1823
+
1824
+ scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0);
1825
+ if (!sgl || !sgcnt) {
1826
+ tdata->flags |= CXGBI_TASK_SGL_COPY;
1827
+ return;
1828
+ }
1829
+
1830
+ for_each_sg(sgl, sg, sgcnt, i) {
1831
+ if (page_count(sg_page(sg)) < 1) {
1832
+ tdata->flags |= CXGBI_TASK_SGL_COPY;
1833
+ return;
1834
+ }
1835
+ }
1836
+}
1837
+
1838
+static int
1839
+cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count,
1840
+ u32 *dlimit)
1841
+{
1842
+ struct scsi_cmnd *sc = task->sc;
1843
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1844
+ struct scatterlist *sgl = NULL;
1845
+ struct scatterlist *sg;
1846
+ u32 dlen = 0;
1847
+ u32 sgcnt;
1848
+ int err;
1849
+
1850
+ if (!sc)
1851
+ return 0;
1852
+
1853
+ scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0);
1854
+ if (!sgl || !sgcnt)
1855
+ return 0;
1856
+
1857
+ err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg);
1858
+ if (err < 0) {
1859
+ pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n",
1860
+ sgcnt, offset, tdata->dlen);
1861
+ return err;
1862
+ }
1863
+ err = sgl_read_to_frags(sg, tdata->sgoffset, count,
1864
+ tdata->frags, MAX_SKB_FRAGS, dlimit);
1865
+ if (err < 0) {
1866
+ log_debug(1 << CXGBI_DBG_ISCSI,
1867
+ "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n",
1868
+ sgcnt, offset, count, tdata->dlen, *dlimit);
1869
+ return err;
1870
+ }
1871
+ tdata->offset = offset;
1872
+ tdata->count = count;
1873
+ tdata->nr_frags = err;
1874
+ tdata->total_count = count;
1875
+ tdata->total_offset = offset;
1876
+
1877
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1878
+ "%s: offset %u, count %u,\n"
1879
+ "err %u, total_count %u, total_offset %u\n",
1880
+ __func__, offset, count, err, tdata->total_count, tdata->total_offset);
1881
+
1882
+ return 0;
1883
+}
1884
+
1885
+int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op)
1886
+{
1887
+ struct iscsi_conn *conn = task->conn;
1888
+ struct iscsi_session *session = task->conn->session;
1889
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
18901890 struct cxgbi_conn *cconn = tcp_conn->dd_data;
18911891 struct cxgbi_device *cdev = cconn->chba->cdev;
1892
- struct iscsi_conn *conn = task->conn;
1892
+ struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL;
18931893 struct iscsi_tcp_task *tcp_task = task->dd_data;
18941894 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
18951895 struct scsi_cmnd *sc = task->sc;
1896
- struct cxgbi_sock *csk = cconn->cep->csk;
1897
- struct net_device *ndev = cdev->ports[csk->port_id];
1898
- int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1896
+ u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1897
+ u32 max_txdata_len = conn->max_xmit_dlength;
1898
+ u32 iso_tx_rsvd = 0, local_iso_info = 0;
1899
+ u32 last_tdata_offset, last_tdata_count;
1900
+ int err = 0;
1901
+
1902
+ if (!tcp_task) {
1903
+ pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n",
1904
+ task, tcp_task, tdata);
1905
+ return -ENOMEM;
1906
+ }
1907
+ if (!csk) {
1908
+ pr_err("task 0x%p, csk gone.\n", task);
1909
+ return -EPIPE;
1910
+ }
1911
+
1912
+ op &= ISCSI_OPCODE_MASK;
18991913
19001914 tcp_task->dd_data = tdata;
19011915 task->hdr = NULL;
19021916
1903
- if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1904
- (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1905
- (opcode == ISCSI_OP_SCSI_CMD &&
1906
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1907
- /* data could goes into skb head */
1908
- headroom += min_t(unsigned int,
1909
- SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1910
- conn->max_xmit_dlength);
1917
+ last_tdata_count = tdata->count;
1918
+ last_tdata_offset = tdata->offset;
19111919
1912
- tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1913
- if (!tdata->skb) {
1914
- ndev->stats.tx_dropped++;
1915
- return -ENOMEM;
1920
+ if ((op == ISCSI_OP_SCSI_DATA_OUT) ||
1921
+ ((op == ISCSI_OP_SCSI_CMD) &&
1922
+ (sc->sc_data_direction == DMA_TO_DEVICE))) {
1923
+ u32 remaining_data_tosend, dlimit = 0;
1924
+ u32 max_pdu_size, max_num_pdu, num_pdu;
1925
+ u32 count;
1926
+
1927
+ /* Preserve conn->max_xmit_dlength because it can get updated to
1928
+ * ISO data size.
1929
+ */
1930
+ if (task->state == ISCSI_TASK_PENDING)
1931
+ tdata->max_xmit_dlength = conn->max_xmit_dlength;
1932
+
1933
+ if (!tdata->offset)
1934
+ cxgbi_task_data_sgl_check(task);
1935
+
1936
+ remaining_data_tosend =
1937
+ tdata->dlen - tdata->offset - tdata->count;
1938
+
1939
+recalculate_sgl:
1940
+ max_txdata_len = tdata->max_xmit_dlength;
1941
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1942
+ "tdata->dlen %u, remaining to send %u "
1943
+ "conn->max_xmit_dlength %u, "
1944
+ "tdata->max_xmit_dlength %u\n",
1945
+ tdata->dlen, remaining_data_tosend,
1946
+ conn->max_xmit_dlength, tdata->max_xmit_dlength);
1947
+
1948
+ if (cdev->skb_iso_txhdr && !csk->disable_iso &&
1949
+ (remaining_data_tosend > tdata->max_xmit_dlength) &&
1950
+ !(remaining_data_tosend % 4)) {
1951
+ u32 max_iso_data;
1952
+
1953
+ if ((op == ISCSI_OP_SCSI_CMD) &&
1954
+ session->initial_r2t_en)
1955
+ goto no_iso;
1956
+
1957
+ max_pdu_size = tdata->max_xmit_dlength +
1958
+ ISCSI_PDU_NONPAYLOAD_LEN;
1959
+ max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB,
1960
+ csk->advmss);
1961
+ max_num_pdu = max_iso_data / max_pdu_size;
1962
+
1963
+ num_pdu = (remaining_data_tosend +
1964
+ tdata->max_xmit_dlength - 1) /
1965
+ tdata->max_xmit_dlength;
1966
+
1967
+ if (num_pdu > max_num_pdu)
1968
+ num_pdu = max_num_pdu;
1969
+
1970
+ conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu;
1971
+ max_txdata_len = conn->max_xmit_dlength;
1972
+ iso_tx_rsvd = cdev->skb_iso_txhdr;
1973
+ local_iso_info = sizeof(struct cxgbi_iso_info);
1974
+
1975
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1976
+ "max_pdu_size %u, max_num_pdu %u, "
1977
+ "max_txdata %u, num_pdu %u\n",
1978
+ max_pdu_size, max_num_pdu,
1979
+ max_txdata_len, num_pdu);
1980
+ }
1981
+no_iso:
1982
+ count = min_t(u32, max_txdata_len, remaining_data_tosend);
1983
+ err = cxgbi_task_data_sgl_read(task,
1984
+ tdata->offset + tdata->count,
1985
+ count, &dlimit);
1986
+ if (unlikely(err < 0)) {
1987
+ log_debug(1 << CXGBI_DBG_ISCSI,
1988
+ "task 0x%p, tcp_task 0x%p, tdata 0x%p, "
1989
+ "sgl err %d, count %u, dlimit %u\n",
1990
+ task, tcp_task, tdata, err, count, dlimit);
1991
+ if (dlimit) {
1992
+ remaining_data_tosend =
1993
+ rounddown(dlimit,
1994
+ tdata->max_xmit_dlength);
1995
+ if (!remaining_data_tosend)
1996
+ remaining_data_tosend = dlimit;
1997
+
1998
+ dlimit = 0;
1999
+
2000
+ conn->max_xmit_dlength = remaining_data_tosend;
2001
+ goto recalculate_sgl;
2002
+ }
2003
+
2004
+ pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, "
2005
+ "sgl err %d\n",
2006
+ task, tcp_task, tdata, err);
2007
+ goto ret_err;
2008
+ }
2009
+
2010
+ if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
2011
+ (tdata->nr_frags > MAX_SKB_FRAGS))
2012
+ headroom += conn->max_xmit_dlength;
19162013 }
19172014
1918
- skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
2015
+ tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd +
2016
+ iso_tx_rsvd + headroom, GFP_ATOMIC);
2017
+ if (!tdata->skb) {
2018
+ tdata->count = last_tdata_count;
2019
+ tdata->offset = last_tdata_offset;
2020
+ err = -ENOMEM;
2021
+ goto ret_err;
2022
+ }
2023
+
2024
+ skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd +
2025
+ iso_tx_rsvd);
19192026
19202027 if (task->sc) {
19212028 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
....@@ -1924,24 +2031,99 @@
19242031 if (!task->hdr) {
19252032 __kfree_skb(tdata->skb);
19262033 tdata->skb = NULL;
1927
- ndev->stats.tx_dropped++;
19282034 return -ENOMEM;
19292035 }
19302036 }
1931
- task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
2037
+
2038
+ task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX;
2039
+
2040
+ if (iso_tx_rsvd)
2041
+ cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO);
19322042
19332043 /* data_out uses scsi_cmd's itt */
1934
- if (opcode != ISCSI_OP_SCSI_DATA_OUT)
2044
+ if (op != ISCSI_OP_SCSI_DATA_OUT)
19352045 task_reserve_itt(task, &task->hdr->itt);
19362046
19372047 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1938
- "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1939
- task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1940
- conn->max_xmit_dlength, ntohl(task->hdr->itt));
2048
+ "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
2049
+ task, op, tdata->skb, cdev->skb_tx_rsvd, headroom,
2050
+ conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt));
19412051
19422052 return 0;
2053
+
2054
+ret_err:
2055
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
2056
+ return err;
19432057 }
19442058 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
2059
+
2060
+static int
2061
+cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb,
2062
+ u32 count)
2063
+{
2064
+ struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head;
2065
+ struct iscsi_r2t_info *r2t;
2066
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2067
+ struct iscsi_conn *conn = task->conn;
2068
+ struct iscsi_session *session = conn->session;
2069
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
2070
+ u32 burst_size = 0, r2t_dlength = 0, dlength;
2071
+ u32 max_pdu_len = tdata->max_xmit_dlength;
2072
+ u32 segment_offset = 0;
2073
+ u32 num_pdu;
2074
+
2075
+ if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
2076
+ return 0;
2077
+
2078
+ memset(iso_info, 0, sizeof(struct cxgbi_iso_info));
2079
+
2080
+ if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) {
2081
+ iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE;
2082
+ burst_size = count;
2083
+ }
2084
+
2085
+ dlength = ntoh24(task->hdr->dlength);
2086
+ dlength = min(dlength, max_pdu_len);
2087
+ hton24(task->hdr->dlength, dlength);
2088
+
2089
+ num_pdu = (count + max_pdu_len - 1) / max_pdu_len;
2090
+
2091
+ if (iscsi_task_has_unsol_data(task))
2092
+ r2t = &task->unsol_r2t;
2093
+ else
2094
+ r2t = tcp_task->r2t;
2095
+
2096
+ if (r2t) {
2097
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2098
+ "count %u, tdata->count %u, num_pdu %u,"
2099
+ "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n",
2100
+ count, tdata->count, num_pdu, task->hdr_len,
2101
+ r2t->data_length, r2t->sent);
2102
+
2103
+ r2t_dlength = r2t->data_length - r2t->sent;
2104
+ segment_offset = r2t->sent;
2105
+ r2t->datasn += num_pdu - 1;
2106
+ }
2107
+
2108
+ if (!r2t || !r2t->sent)
2109
+ iso_info->flags |= CXGBI_ISO_INFO_FSLICE;
2110
+
2111
+ if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL)
2112
+ iso_info->flags |= CXGBI_ISO_INFO_LSLICE;
2113
+
2114
+ task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2115
+
2116
+ iso_info->op = task->hdr->opcode;
2117
+ iso_info->ahs = task->hdr->hlength;
2118
+ iso_info->num_pdu = num_pdu;
2119
+ iso_info->mpdu = max_pdu_len;
2120
+ iso_info->burst_size = (burst_size + r2t_dlength) >> 2;
2121
+ iso_info->len = count + task->hdr_len;
2122
+ iso_info->segment_offset = segment_offset;
2123
+
2124
+ cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len;
2125
+ return 0;
2126
+}
19452127
19462128 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
19472129 {
....@@ -1952,133 +2134,260 @@
19522134 submode |= 1;
19532135 if (dcrc)
19542136 submode |= 2;
1955
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
2137
+ cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
19562138 } else
1957
- cxgbi_skcb_ulp_mode(skb) = 0;
2139
+ cxgbi_skcb_tx_ulp_mode(skb) = 0;
19582140 }
2141
+
2142
+static struct page *rsvd_page;
19592143
19602144 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
19612145 unsigned int count)
19622146 {
19632147 struct iscsi_conn *conn = task->conn;
2148
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
19642149 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1965
- struct sk_buff *skb = tdata->skb;
1966
- unsigned int datalen = count;
1967
- int i, padlen = iscsi_padding(count);
2150
+ struct sk_buff *skb;
2151
+ struct scsi_cmnd *sc = task->sc;
2152
+ u32 expected_count, expected_offset;
2153
+ u32 datalen = count, dlimit = 0;
2154
+ u32 i, padlen = iscsi_padding(count);
19682155 struct page *pg;
2156
+ int err;
2157
+
2158
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
2159
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2160
+ task, task->sc, tcp_task,
2161
+ tcp_task ? tcp_task->dd_data : NULL, tdata);
2162
+ return -EINVAL;
2163
+ }
2164
+ skb = tdata->skb;
19692165
19702166 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1971
- "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1972
- task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1973
- ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
2167
+ "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
2168
+ task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
2169
+ be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count);
19742170
19752171 skb_put(skb, task->hdr_len);
19762172 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1977
- if (!count)
1978
- return 0;
1979
-
1980
- if (task->sc) {
1981
- struct scsi_data_buffer *sdb = scsi_out(task->sc);
1982
- struct scatterlist *sg = NULL;
1983
- int err;
1984
-
1985
- tdata->offset = offset;
2173
+ if (!count) {
19862174 tdata->count = count;
1987
- err = sgl_seek_offset(
1988
- sdb->table.sgl, sdb->table.nents,
1989
- tdata->offset, &tdata->sgoffset, &sg);
1990
- if (err < 0) {
1991
- pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1992
- sdb->table.nents, tdata->offset, sdb->length);
1993
- return err;
1994
- }
1995
- err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1996
- tdata->frags, MAX_PDU_FRAGS);
1997
- if (err < 0) {
1998
- pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1999
- sdb->table.nents, tdata->offset, tdata->count);
2000
- return err;
2001
- }
2002
- tdata->nr_frags = err;
2175
+ tdata->offset = offset;
2176
+ tdata->nr_frags = 0;
2177
+ tdata->total_offset = 0;
2178
+ tdata->total_count = 0;
2179
+ if (tdata->max_xmit_dlength)
2180
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
2181
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
2182
+ return 0;
2183
+ }
20032184
2004
- if (tdata->nr_frags > MAX_SKB_FRAGS ||
2005
- (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
2185
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2186
+ "data->total_count %u, tdata->total_offset %u\n",
2187
+ tdata->total_count, tdata->total_offset);
2188
+
2189
+ expected_count = tdata->total_count;
2190
+ expected_offset = tdata->total_offset;
2191
+
2192
+ if ((count != expected_count) ||
2193
+ (offset != expected_offset)) {
2194
+ err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit);
2195
+ if (err < 0) {
2196
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p "
2197
+ "dlimit %u, sgl err %d.\n", task, task->sc,
2198
+ tcp_task, tcp_task ? tcp_task->dd_data : NULL,
2199
+ tdata, dlimit, err);
2200
+ return err;
2201
+ }
2202
+ }
2203
+
2204
+ /* Restore original value of conn->max_xmit_dlength because
2205
+ * it can get updated to ISO data size.
2206
+ */
2207
+ conn->max_xmit_dlength = tdata->max_xmit_dlength;
2208
+
2209
+ if (sc) {
2210
+ struct page_frag *frag = tdata->frags;
2211
+
2212
+ if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
2213
+ (tdata->nr_frags > MAX_SKB_FRAGS) ||
2214
+ (padlen && (tdata->nr_frags ==
2215
+ MAX_SKB_FRAGS))) {
20062216 char *dst = skb->data + task->hdr_len;
2007
- struct page_frag *frag = tdata->frags;
20082217
20092218 /* data fits in the skb's headroom */
20102219 for (i = 0; i < tdata->nr_frags; i++, frag++) {
20112220 char *src = kmap_atomic(frag->page);
20122221
2013
- memcpy(dst, src+frag->offset, frag->size);
2222
+ memcpy(dst, src + frag->offset, frag->size);
20142223 dst += frag->size;
20152224 kunmap_atomic(src);
20162225 }
2226
+
20172227 if (padlen) {
20182228 memset(dst, 0, padlen);
20192229 padlen = 0;
20202230 }
20212231 skb_put(skb, count + padlen);
20222232 } else {
2023
- /* data fit into frag_list */
2024
- for (i = 0; i < tdata->nr_frags; i++) {
2025
- __skb_fill_page_desc(skb, i,
2026
- tdata->frags[i].page,
2027
- tdata->frags[i].offset,
2028
- tdata->frags[i].size);
2029
- skb_frag_ref(skb, i);
2233
+ for (i = 0; i < tdata->nr_frags; i++, frag++) {
2234
+ get_page(frag->page);
2235
+ skb_fill_page_desc(skb, i, frag->page,
2236
+ frag->offset, frag->size);
20302237 }
2031
- skb_shinfo(skb)->nr_frags = tdata->nr_frags;
2238
+
20322239 skb->len += count;
20332240 skb->data_len += count;
20342241 skb->truesize += count;
20352242 }
2036
-
20372243 } else {
2038
- pg = virt_to_page(task->data);
2039
-
2244
+ pg = virt_to_head_page(task->data);
20402245 get_page(pg);
2041
- skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
2042
- count);
2246
+ skb_fill_page_desc(skb, 0, pg,
2247
+ task->data - (char *)page_address(pg),
2248
+ count);
20432249 skb->len += count;
20442250 skb->data_len += count;
20452251 skb->truesize += count;
20462252 }
20472253
20482254 if (padlen) {
2049
- i = skb_shinfo(skb)->nr_frags;
2255
+ get_page(rsvd_page);
20502256 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2051
- virt_to_page(padding), offset_in_page(padding),
2052
- padlen);
2257
+ rsvd_page, 0, padlen);
20532258
20542259 skb->data_len += padlen;
20552260 skb->truesize += padlen;
20562261 skb->len += padlen;
20572262 }
20582263
2264
+ if (likely(count > tdata->max_xmit_dlength))
2265
+ cxgbi_prep_iso_info(task, skb, count);
2266
+ else
2267
+ cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
2268
+
20592269 return 0;
20602270 }
20612271 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2272
+
2273
+static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
2274
+{
2275
+ struct cxgbi_device *cdev = csk->cdev;
2276
+ struct cxgbi_iso_info *iso_cpl;
2277
+ u32 frags = skb_shinfo(skb)->nr_frags;
2278
+ u32 extra_len, num_pdu, hdr_len;
2279
+ u32 iso_tx_rsvd = 0;
2280
+
2281
+ if (csk->state != CTP_ESTABLISHED) {
2282
+ log_debug(1 << CXGBI_DBG_PDU_TX,
2283
+ "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
2284
+ csk, csk->state, csk->flags, csk->tid);
2285
+ return -EPIPE;
2286
+ }
2287
+
2288
+ if (csk->err) {
2289
+ log_debug(1 << CXGBI_DBG_PDU_TX,
2290
+ "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
2291
+ csk, csk->state, csk->flags, csk->tid, csk->err);
2292
+ return -EPIPE;
2293
+ }
2294
+
2295
+ if ((cdev->flags & CXGBI_FLAG_DEV_T3) &&
2296
+ before((csk->snd_win + csk->snd_una), csk->write_seq)) {
2297
+ log_debug(1 << CXGBI_DBG_PDU_TX,
2298
+ "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
2299
+ csk, csk->state, csk->flags, csk->tid, csk->write_seq,
2300
+ csk->snd_una, csk->snd_win);
2301
+ return -ENOBUFS;
2302
+ }
2303
+
2304
+ if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
2305
+ iso_tx_rsvd = cdev->skb_iso_txhdr;
2306
+
2307
+ if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) {
2308
+ pr_err("csk 0x%p, skb head %u < %u.\n",
2309
+ csk, skb_headroom(skb), cdev->skb_tx_rsvd);
2310
+ return -EINVAL;
2311
+ }
2312
+
2313
+ if (skb->len != skb->data_len)
2314
+ frags++;
2315
+
2316
+ if (frags >= SKB_WR_LIST_SIZE) {
2317
+ pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
2318
+ csk, skb_shinfo(skb)->nr_frags, skb->len,
2319
+ skb->data_len, SKB_WR_LIST_SIZE);
2320
+ return -EINVAL;
2321
+ }
2322
+
2323
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
2324
+ skb_reset_transport_header(skb);
2325
+ cxgbi_sock_skb_entail(csk, skb);
2326
+
2327
+ extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
2328
+
2329
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) {
2330
+ iso_cpl = (struct cxgbi_iso_info *)skb->head;
2331
+ num_pdu = iso_cpl->num_pdu;
2332
+ hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
2333
+ extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) *
2334
+ num_pdu) + (hdr_len * (num_pdu - 1));
2335
+ }
2336
+
2337
+ csk->write_seq += (skb->len + extra_len);
2338
+
2339
+ return 0;
2340
+}
2341
+
2342
+static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb)
2343
+{
2344
+ struct cxgbi_device *cdev = csk->cdev;
2345
+ int len = skb->len;
2346
+ int err;
2347
+
2348
+ spin_lock_bh(&csk->lock);
2349
+ err = cxgbi_sock_tx_queue_up(csk, skb);
2350
+ if (err < 0) {
2351
+ spin_unlock_bh(&csk->lock);
2352
+ return err;
2353
+ }
2354
+
2355
+ if (likely(skb_queue_len(&csk->write_queue)))
2356
+ cdev->csk_push_tx_frames(csk, 0);
2357
+ spin_unlock_bh(&csk->lock);
2358
+ return len;
2359
+}
20622360
20632361 int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
20642362 {
20652363 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
20662364 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2365
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
20672366 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
20682367 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
2069
- struct sk_buff *skb = tdata->skb;
2368
+ struct sk_buff *skb;
20702369 struct cxgbi_sock *csk = NULL;
2071
- unsigned int datalen;
2370
+ u32 pdulen = 0;
2371
+ u32 datalen;
20722372 int err;
20732373
2374
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
2375
+ pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2376
+ task, task->sc, tcp_task,
2377
+ tcp_task ? tcp_task->dd_data : NULL, tdata);
2378
+ return -EINVAL;
2379
+ }
2380
+
2381
+ skb = tdata->skb;
20742382 if (!skb) {
20752383 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2076
- "task 0x%p\n", task);
2384
+ "task 0x%p, skb NULL.\n", task);
20772385 return 0;
20782386 }
20792387
20802388 if (cconn && cconn->cep)
20812389 csk = cconn->cep->csk;
2390
+
20822391 if (!csk) {
20832392 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
20842393 "task 0x%p, csk gone.\n", task);
....@@ -2102,13 +2411,12 @@
21022411 if (!task->sc)
21032412 memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
21042413
2105
- err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2414
+ err = cxgbi_sock_send_skb(csk, skb);
21062415 if (err > 0) {
2107
- int pdulen = err;
2416
+ pdulen += err;
21082417
2109
- log_debug(1 << CXGBI_DBG_PDU_TX,
2110
- "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2111
- task, task->sc, skb, skb->len, skb->data_len, err);
2418
+ log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n",
2419
+ task, task->sc, err);
21122420
21132421 if (task->conn->hdrdgst_en)
21142422 pdulen += ISCSI_DIGEST_SIZE;
....@@ -2117,24 +2425,42 @@
21172425 pdulen += ISCSI_DIGEST_SIZE;
21182426
21192427 task->conn->txdata_octets += pdulen;
2428
+
2429
+ if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) {
2430
+ if (time_after(jiffies, csk->prev_iso_ts + HZ)) {
2431
+ csk->disable_iso = false;
2432
+ csk->prev_iso_ts = 0;
2433
+ log_debug(1 << CXGBI_DBG_PDU_TX,
2434
+ "enable iso: csk 0x%p\n", csk);
2435
+ }
2436
+ }
2437
+
21202438 return 0;
21212439 }
21222440
21232441 if (err == -EAGAIN || err == -ENOBUFS) {
21242442 log_debug(1 << CXGBI_DBG_PDU_TX,
2125
- "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2126
- task, skb, skb->len, skb->data_len, err);
2443
+ "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2444
+ task, skb, skb->len, skb->data_len, err);
21272445 /* reset skb to send when we are called again */
21282446 tdata->skb = skb;
2447
+
2448
+ if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) &&
2449
+ (csk->no_tx_credits++ >= 2)) {
2450
+ csk->disable_iso = true;
2451
+ csk->prev_iso_ts = jiffies;
2452
+ log_debug(1 << CXGBI_DBG_PDU_TX,
2453
+ "disable iso:csk 0x%p, ts:%lu\n",
2454
+ csk, csk->prev_iso_ts);
2455
+ }
2456
+
21292457 return err;
21302458 }
21312459
21322460 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2133
- "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2134
- task->itt, skb, skb->len, skb->data_len, err);
2135
-
2461
+ "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2462
+ task->itt, skb, skb->len, skb->data_len, err);
21362463 __kfree_skb(skb);
2137
-
21382464 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
21392465 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
21402466 return err;
....@@ -2146,7 +2472,7 @@
21462472 struct iscsi_tcp_task *tcp_task = task->dd_data;
21472473 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
21482474
2149
- if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) {
2475
+ if (!tcp_task || (tcp_task->dd_data != tdata)) {
21502476 pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
21512477 task, task->sc, tcp_task,
21522478 tcp_task ? tcp_task->dd_data : NULL, tdata);
....@@ -2286,40 +2612,11 @@
22862612 }
22872613 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
22882614
2289
-static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
2290
-{
2291
- int len;
2292
-
2293
- cxgbi_sock_get(csk);
2294
- len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
2295
- cxgbi_sock_put(csk);
2296
-
2297
- return len;
2298
-}
2299
-
2300
-static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
2301
-{
2302
- int len;
2303
-
2304
- cxgbi_sock_get(csk);
2305
- if (csk->csk_family == AF_INET)
2306
- len = sprintf(buf, "%pI4",
2307
- &csk->daddr.sin_addr.s_addr);
2308
- else
2309
- len = sprintf(buf, "%pI6",
2310
- &csk->daddr6.sin6_addr);
2311
-
2312
- cxgbi_sock_put(csk);
2313
-
2314
- return len;
2315
-}
2316
-
23172615 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
23182616 char *buf)
23192617 {
23202618 struct cxgbi_endpoint *cep = ep->dd_data;
23212619 struct cxgbi_sock *csk;
2322
- int len;
23232620
23242621 log_debug(1 << CXGBI_DBG_ISCSI,
23252622 "cls_conn 0x%p, param %d.\n", ep, param);
....@@ -2337,9 +2634,9 @@
23372634 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
23382635 &csk->daddr, param, buf);
23392636 default:
2340
- return -ENOSYS;
2637
+ break;
23412638 }
2342
- return len;
2639
+ return -ENOSYS;
23432640 }
23442641 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
23452642
....@@ -2393,11 +2690,13 @@
23932690 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
23942691 ppm->tformat.pgsz_idx_dflt);
23952692 if (err < 0)
2396
- return err;
2693
+ goto put_ep;
23972694
23982695 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2399
- if (err)
2400
- return -EINVAL;
2696
+ if (err) {
2697
+ err = -EINVAL;
2698
+ goto put_ep;
2699
+ }
24012700
24022701 /* calculate the tag idx bits needed for this conn based on cmds_max */
24032702 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
....@@ -2418,7 +2717,9 @@
24182717 /* init recv engine */
24192718 iscsi_tcp_hdr_recv_prep(tcp_conn);
24202719
2421
- return 0;
2720
+put_ep:
2721
+ iscsi_put_endpoint(ep);
2722
+ return err;
24222723 }
24232724 EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
24242725
....@@ -2572,13 +2873,9 @@
25722873 pr_info("shost 0x%p, priv NULL.\n", shost);
25732874 goto err_out;
25742875 }
2575
-
2576
- rtnl_lock();
2577
- if (!vlan_uses_dev(hba->ndev))
2578
- ifindex = hba->ndev->ifindex;
2579
- rtnl_unlock();
25802876 }
25812877
2878
+check_route:
25822879 if (dst_addr->sa_family == AF_INET) {
25832880 csk = cxgbi_check_route(dst_addr, ifindex);
25842881 #if IS_ENABLED(CONFIG_IPV6)
....@@ -2599,6 +2896,13 @@
25992896 if (!hba)
26002897 hba = csk->cdev->hbas[csk->port_id];
26012898 else if (hba != csk->cdev->hbas[csk->port_id]) {
2899
+ if (ifindex != hba->ndev->ifindex) {
2900
+ cxgbi_sock_put(csk);
2901
+ cxgbi_sock_closed(csk);
2902
+ ifindex = hba->ndev->ifindex;
2903
+ goto check_route;
2904
+ }
2905
+
26022906 pr_info("Could not connect through requested host %u"
26032907 "hba 0x%p != 0x%p (%u).\n",
26042908 shost->host_no, hba,
....@@ -2774,14 +3078,19 @@
27743078 {
27753079 pr_info("%s", version);
27763080
2777
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
3081
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
27783082 sizeof(struct cxgbi_skb_cb));
3083
+ rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3084
+ if (!rsvd_page)
3085
+ return -ENOMEM;
3086
+
27793087 return 0;
27803088 }
27813089
27823090 static void __exit libcxgbi_exit_module(void)
27833091 {
27843092 cxgbi_device_unregister_all(0xFF);
3093
+ put_page(rsvd_page);
27853094 return;
27863095 }
27873096