forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/block/xen-blkfront.c
....@@ -151,11 +151,12 @@
151151 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
152152 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
153153
154
+static bool __read_mostly xen_blkif_trusted = true;
155
+module_param_named(trusted, xen_blkif_trusted, bool, 0644);
156
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
157
+
154158 #define BLK_RING_SIZE(info) \
155159 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
156
-
157
-#define BLK_MAX_RING_SIZE \
158
- __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
159160
160161 /*
161162 * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
....@@ -180,12 +181,12 @@
180181 unsigned int evtchn, irq;
181182 struct work_struct work;
182183 struct gnttab_free_callback callback;
183
- struct blk_shadow shadow[BLK_MAX_RING_SIZE];
184184 struct list_head indirect_pages;
185185 struct list_head grants;
186186 unsigned int persistent_gnts_c;
187187 unsigned long shadow_free;
188188 struct blkfront_info *dev_info;
189
+ struct blk_shadow shadow[];
189190 };
190191
191192 /*
....@@ -210,7 +211,11 @@
210211 unsigned int feature_fua:1;
211212 unsigned int feature_discard:1;
212213 unsigned int feature_secdiscard:1;
214
+ /* Connect-time cached feature_persistent parameter */
215
+ unsigned int feature_persistent_parm:1;
216
+ /* Persistent grants feature negotiation result */
213217 unsigned int feature_persistent:1;
218
+ unsigned int bounce:1;
214219 unsigned int discard_granularity;
215220 unsigned int discard_alignment;
216221 /* Number of 4KB segments handled */
....@@ -219,6 +224,7 @@
219224 struct blk_mq_tag_set tag_set;
220225 struct blkfront_ring_info *rinfo;
221226 unsigned int nr_rings;
227
+ unsigned int rinfo_size;
222228 /* Save uncomplete reqs and bios for migration. */
223229 struct list_head requests;
224230 struct bio_list bio_list;
....@@ -265,6 +271,18 @@
265271 static void blkfront_gather_backend_features(struct blkfront_info *info);
266272 static int negotiate_mq(struct blkfront_info *info);
267273
274
+#define for_each_rinfo(info, ptr, idx) \
275
+ for ((ptr) = (info)->rinfo, (idx) = 0; \
276
+ (idx) < (info)->nr_rings; \
277
+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
278
+
279
+static inline struct blkfront_ring_info *
280
+get_rinfo(const struct blkfront_info *info, unsigned int i)
281
+{
282
+ BUG_ON(i >= info->nr_rings);
283
+ return (void *)info->rinfo + i * info->rinfo_size;
284
+}
285
+
268286 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
269287 {
270288 unsigned long free = rinfo->shadow_free;
....@@ -300,8 +318,8 @@
300318 if (!gnt_list_entry)
301319 goto out_of_memory;
302320
303
- if (info->feature_persistent) {
304
- granted_page = alloc_page(GFP_NOIO);
321
+ if (info->bounce) {
322
+ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
305323 if (!granted_page) {
306324 kfree(gnt_list_entry);
307325 goto out_of_memory;
....@@ -320,7 +338,7 @@
320338 list_for_each_entry_safe(gnt_list_entry, n,
321339 &rinfo->grants, node) {
322340 list_del(&gnt_list_entry->node);
323
- if (info->feature_persistent)
341
+ if (info->bounce)
324342 __free_page(gnt_list_entry->page);
325343 kfree(gnt_list_entry);
326344 i--;
....@@ -366,7 +384,7 @@
366384 /* Assign a gref to this page */
367385 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
368386 BUG_ON(gnt_list_entry->gref == -ENOSPC);
369
- if (info->feature_persistent)
387
+ if (info->bounce)
370388 grant_foreign_access(gnt_list_entry, info);
371389 else {
372390 /* Grant access to the GFN passed by the caller */
....@@ -390,7 +408,7 @@
390408 /* Assign a gref to this page */
391409 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
392410 BUG_ON(gnt_list_entry->gref == -ENOSPC);
393
- if (!info->feature_persistent) {
411
+ if (!info->bounce) {
394412 struct page *indirect_page;
395413
396414 /* Fetch a pre-allocated page to use for indirect grefs */
....@@ -705,7 +723,7 @@
705723 .grant_idx = 0,
706724 .segments = NULL,
707725 .rinfo = rinfo,
708
- .need_copy = rq_data_dir(req) && info->feature_persistent,
726
+ .need_copy = rq_data_dir(req) && info->bounce,
709727 };
710728
711729 /*
....@@ -898,8 +916,7 @@
898916 struct blkfront_info *info = hctx->queue->queuedata;
899917 struct blkfront_ring_info *rinfo = NULL;
900918
901
- BUG_ON(info->nr_rings <= qid);
902
- rinfo = &info->rinfo[qid];
919
+ rinfo = get_rinfo(info, qid);
903920 blk_mq_start_request(qd->rq);
904921 spin_lock_irqsave(&rinfo->ring_lock, flags);
905922 if (RING_FULL(&rinfo->ring))
....@@ -990,7 +1007,7 @@
9901007 } else
9911008 info->tag_set.queue_depth = BLK_RING_SIZE(info);
9921009 info->tag_set.numa_node = NUMA_NO_NODE;
993
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
1010
+ info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
9941011 info->tag_set.cmd_size = sizeof(struct blkif_req);
9951012 info->tag_set.driver_data = info;
9961013
....@@ -1026,11 +1043,12 @@
10261043 {
10271044 blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
10281045 info->feature_fua ? true : false);
1029
- pr_info("blkfront: %s: %s %s %s %s %s\n",
1046
+ pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
10301047 info->gd->disk_name, flush_info(info),
10311048 "persistent grants:", info->feature_persistent ?
10321049 "enabled;" : "disabled;", "indirect descriptors:",
1033
- info->max_indirect_segments ? "enabled;" : "disabled;");
1050
+ info->max_indirect_segments ? "enabled;" : "disabled;",
1051
+ "bounce buffer:", info->bounce ? "enabled" : "disabled;");
10341052 }
10351053
10361054 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
....@@ -1197,6 +1215,7 @@
11971215 static void xlvbd_release_gendisk(struct blkfront_info *info)
11981216 {
11991217 unsigned int minor, nr_minors, i;
1218
+ struct blkfront_ring_info *rinfo;
12001219
12011220 if (info->rq == NULL)
12021221 return;
....@@ -1204,9 +1223,7 @@
12041223 /* No more blkif_request(). */
12051224 blk_mq_stop_hw_queues(info->rq);
12061225
1207
- for (i = 0; i < info->nr_rings; i++) {
1208
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
1209
-
1226
+ for_each_rinfo(info, rinfo, i) {
12101227 /* No more gnttab callback work. */
12111228 gnttab_cancel_free_callback(&rinfo->callback);
12121229
....@@ -1265,7 +1282,7 @@
12651282 if (!list_empty(&rinfo->indirect_pages)) {
12661283 struct page *indirect_page, *n;
12671284
1268
- BUG_ON(info->feature_persistent);
1285
+ BUG_ON(info->bounce);
12691286 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
12701287 list_del(&indirect_page->lru);
12711288 __free_page(indirect_page);
....@@ -1282,7 +1299,7 @@
12821299 0, 0UL);
12831300 rinfo->persistent_gnts_c--;
12841301 }
1285
- if (info->feature_persistent)
1302
+ if (info->bounce)
12861303 __free_page(persistent_gnt->page);
12871304 kfree(persistent_gnt);
12881305 }
....@@ -1303,7 +1320,7 @@
13031320 for (j = 0; j < segs; j++) {
13041321 persistent_gnt = rinfo->shadow[i].grants_used[j];
13051322 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1306
- if (info->feature_persistent)
1323
+ if (info->bounce)
13071324 __free_page(persistent_gnt->page);
13081325 kfree(persistent_gnt);
13091326 }
....@@ -1344,7 +1361,8 @@
13441361 rinfo->ring_ref[i] = GRANT_INVALID_REF;
13451362 }
13461363 }
1347
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1364
+ free_pages_exact(rinfo->ring.sring,
1365
+ info->nr_ring_pages * XEN_PAGE_SIZE);
13481366 rinfo->ring.sring = NULL;
13491367
13501368 if (rinfo->irq)
....@@ -1355,6 +1373,7 @@
13551373 static void blkif_free(struct blkfront_info *info, int suspend)
13561374 {
13571375 unsigned int i;
1376
+ struct blkfront_ring_info *rinfo;
13581377
13591378 /* Prevent new requests being issued until we fix things up. */
13601379 info->connected = suspend ?
....@@ -1363,8 +1382,8 @@
13631382 if (info->rq)
13641383 blk_mq_stop_hw_queues(info->rq);
13651384
1366
- for (i = 0; i < info->nr_rings; i++)
1367
- blkif_free_ring(&info->rinfo[i]);
1385
+ for_each_rinfo(info, rinfo, i)
1386
+ blkif_free_ring(rinfo);
13681387
13691388 kvfree(info->rinfo);
13701389 info->rinfo = NULL;
....@@ -1406,7 +1425,6 @@
14061425 case BLKIF_RSP_EOPNOTSUPP:
14071426 return REQ_EOPNOTSUPP;
14081427 case BLKIF_RSP_ERROR:
1409
- /* Fallthrough. */
14101428 default:
14111429 return REQ_ERROR;
14121430 }
....@@ -1428,9 +1446,15 @@
14281446 return BLKIF_RSP_OKAY;
14291447 }
14301448
1431
-static bool blkif_completion(unsigned long *id,
1432
- struct blkfront_ring_info *rinfo,
1433
- struct blkif_response *bret)
1449
+/*
1450
+ * Return values:
1451
+ * 1 response processed.
1452
+ * 0 missing further responses.
1453
+ * -1 error while processing.
1454
+ */
1455
+static int blkif_completion(unsigned long *id,
1456
+ struct blkfront_ring_info *rinfo,
1457
+ struct blkif_response *bret)
14341458 {
14351459 int i = 0;
14361460 struct scatterlist *sg;
....@@ -1453,7 +1477,7 @@
14531477
14541478 /* Wait the second response if not yet here. */
14551479 if (s2->status < REQ_DONE)
1456
- return false;
1480
+ return 0;
14571481
14581482 bret->status = blkif_get_final_status(s->status,
14591483 s2->status);
....@@ -1486,7 +1510,7 @@
14861510 data.s = s;
14871511 num_sg = s->num_sg;
14881512
1489
- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1513
+ if (bret->operation == BLKIF_OP_READ && info->bounce) {
14901514 for_each_sg(s->sg, sg, num_sg, i) {
14911515 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
14921516
....@@ -1504,47 +1528,48 @@
15041528 }
15051529 /* Add the persistent grant into the list of free grants */
15061530 for (i = 0; i < num_grant; i++) {
1507
- if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1531
+ if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
15081532 /*
15091533 * If the grant is still mapped by the backend (the
15101534 * backend has chosen to make this grant persistent)
15111535 * we add it at the head of the list, so it will be
15121536 * reused first.
15131537 */
1514
- if (!info->feature_persistent)
1515
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1516
- s->grants_used[i]->gref);
1538
+ if (!info->feature_persistent) {
1539
+ pr_alert("backed has not unmapped grant: %u\n",
1540
+ s->grants_used[i]->gref);
1541
+ return -1;
1542
+ }
15171543 list_add(&s->grants_used[i]->node, &rinfo->grants);
15181544 rinfo->persistent_gnts_c++;
15191545 } else {
15201546 /*
1521
- * If the grant is not mapped by the backend we end the
1522
- * foreign access and add it to the tail of the list,
1523
- * so it will not be picked again unless we run out of
1524
- * persistent grants.
1547
+ * If the grant is not mapped by the backend we add it
1548
+ * to the tail of the list, so it will not be picked
1549
+ * again unless we run out of persistent grants.
15251550 */
1526
- gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
15271551 s->grants_used[i]->gref = GRANT_INVALID_REF;
15281552 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
15291553 }
15301554 }
15311555 if (s->req.operation == BLKIF_OP_INDIRECT) {
15321556 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1533
- if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1534
- if (!info->feature_persistent)
1535
- pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1536
- s->indirect_grants[i]->gref);
1557
+ if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
1558
+ if (!info->feature_persistent) {
1559
+ pr_alert("backed has not unmapped grant: %u\n",
1560
+ s->indirect_grants[i]->gref);
1561
+ return -1;
1562
+ }
15371563 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
15381564 rinfo->persistent_gnts_c++;
15391565 } else {
15401566 struct page *indirect_page;
15411567
1542
- gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
15431568 /*
15441569 * Add the used indirect page back to the list of
15451570 * available pages for indirect grefs.
15461571 */
1547
- if (!info->feature_persistent) {
1572
+ if (!info->bounce) {
15481573 indirect_page = s->indirect_grants[i]->page;
15491574 list_add(&indirect_page->lru, &rinfo->indirect_pages);
15501575 }
....@@ -1554,7 +1579,7 @@
15541579 }
15551580 }
15561581
1557
- return true;
1582
+ return 1;
15581583 }
15591584
15601585 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
....@@ -1620,12 +1645,17 @@
16201645 }
16211646
16221647 if (bret.operation != BLKIF_OP_DISCARD) {
1648
+ int ret;
1649
+
16231650 /*
16241651 * We may need to wait for an extra response if the
16251652 * I/O request is split in 2
16261653 */
1627
- if (!blkif_completion(&id, rinfo, &bret))
1654
+ ret = blkif_completion(&id, rinfo, &bret);
1655
+ if (!ret)
16281656 continue;
1657
+ if (unlikely(ret < 0))
1658
+ goto err;
16291659 }
16301660
16311661 if (add_id_to_freelist(rinfo, id)) {
....@@ -1673,7 +1703,7 @@
16731703 info->feature_flush = 0;
16741704 xlvbd_flush(info);
16751705 }
1676
- /* fall through */
1706
+ fallthrough;
16771707 case BLKIF_OP_READ:
16781708 case BLKIF_OP_WRITE:
16791709 if (unlikely(bret.status != BLKIF_RSP_OKAY))
....@@ -1686,7 +1716,8 @@
16861716 BUG();
16871717 }
16881718
1689
- blk_mq_complete_request(req);
1719
+ if (likely(!blk_should_fake_timeout(req->q)))
1720
+ blk_mq_complete_request(req);
16901721 }
16911722
16921723 rinfo->ring.rsp_cons = i;
....@@ -1731,8 +1762,7 @@
17311762 for (i = 0; i < info->nr_ring_pages; i++)
17321763 rinfo->ring_ref[i] = GRANT_INVALID_REF;
17331764
1734
- sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1735
- get_order(ring_size));
1765
+ sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
17361766 if (!sring) {
17371767 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
17381768 return -ENOMEM;
....@@ -1742,7 +1772,7 @@
17421772
17431773 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
17441774 if (err < 0) {
1745
- free_pages((unsigned long)sring, get_order(ring_size));
1775
+ free_pages_exact(sring, ring_size);
17461776 rinfo->ring.sring = NULL;
17471777 goto fail;
17481778 }
....@@ -1822,6 +1852,12 @@
18221852 kfree(info);
18231853 }
18241854
1855
+/* Enable the persistent grants feature. */
1856
+static bool feature_persistent = true;
1857
+module_param(feature_persistent, bool, 0644);
1858
+MODULE_PARM_DESC(feature_persistent,
1859
+ "Enables the persistent grants feature");
1860
+
18251861 /* Common code used when first setting up, and when resuming. */
18261862 static int talk_to_blkback(struct xenbus_device *dev,
18271863 struct blkfront_info *info)
....@@ -1831,9 +1867,14 @@
18311867 int err;
18321868 unsigned int i, max_page_order;
18331869 unsigned int ring_page_order;
1870
+ struct blkfront_ring_info *rinfo;
18341871
18351872 if (!info)
18361873 return -ENODEV;
1874
+
1875
+ /* Check if backend is trusted. */
1876
+ info->bounce = !xen_blkif_trusted ||
1877
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
18371878
18381879 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
18391880 "max-ring-page-order", 0);
....@@ -1844,9 +1885,7 @@
18441885 if (err)
18451886 goto destroy_blkring;
18461887
1847
- for (i = 0; i < info->nr_rings; i++) {
1848
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
1849
-
1888
+ for_each_rinfo(info, rinfo, i) {
18501889 /* Create shared ring, alloc event channel. */
18511890 err = setup_blkring(dev, rinfo);
18521891 if (err)
....@@ -1871,7 +1910,7 @@
18711910
18721911 /* We already got the number of queues/rings in _probe */
18731912 if (info->nr_rings == 1) {
1874
- err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
1913
+ err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
18751914 if (err)
18761915 goto destroy_blkring;
18771916 } else {
....@@ -1893,10 +1932,10 @@
18931932 goto abort_transaction;
18941933 }
18951934
1896
- for (i = 0; i < info->nr_rings; i++) {
1935
+ for_each_rinfo(info, rinfo, i) {
18971936 memset(path, 0, pathsize);
18981937 snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1899
- err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
1938
+ err = write_per_ring_nodes(xbt, rinfo, path);
19001939 if (err) {
19011940 kfree(path);
19021941 goto destroy_blkring;
....@@ -1910,8 +1949,9 @@
19101949 message = "writing protocol";
19111950 goto abort_transaction;
19121951 }
1913
- err = xenbus_printf(xbt, dev->nodename,
1914
- "feature-persistent", "%u", 1);
1952
+ info->feature_persistent_parm = feature_persistent;
1953
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
1954
+ info->feature_persistent_parm);
19151955 if (err)
19161956 dev_warn(&dev->dev,
19171957 "writing persistent grants feature to xenbus");
....@@ -1924,9 +1964,8 @@
19241964 goto destroy_blkring;
19251965 }
19261966
1927
- for (i = 0; i < info->nr_rings; i++) {
1967
+ for_each_rinfo(info, rinfo, i) {
19281968 unsigned int j;
1929
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
19301969
19311970 for (j = 0; j < BLK_RING_SIZE(info); j++)
19321971 rinfo->shadow[j].req.u.rw.id = j + 1;
....@@ -1956,6 +1995,7 @@
19561995 {
19571996 unsigned int backend_max_queues;
19581997 unsigned int i;
1998
+ struct blkfront_ring_info *rinfo;
19591999
19602000 BUG_ON(info->nr_rings);
19612001
....@@ -1967,19 +2007,16 @@
19672007 if (!info->nr_rings)
19682008 info->nr_rings = 1;
19692009
1970
- info->rinfo = kvcalloc(info->nr_rings,
1971
- sizeof(struct blkfront_ring_info),
1972
- GFP_KERNEL);
2010
+ info->rinfo_size = struct_size(info->rinfo, shadow,
2011
+ BLK_RING_SIZE(info));
2012
+ info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
19732013 if (!info->rinfo) {
19742014 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
19752015 info->nr_rings = 0;
19762016 return -ENOMEM;
19772017 }
19782018
1979
- for (i = 0; i < info->nr_rings; i++) {
1980
- struct blkfront_ring_info *rinfo;
1981
-
1982
- rinfo = &info->rinfo[i];
2019
+ for_each_rinfo(info, rinfo, i) {
19832020 INIT_LIST_HEAD(&rinfo->indirect_pages);
19842021 INIT_LIST_HEAD(&rinfo->grants);
19852022 rinfo->dev_info = info;
....@@ -1988,6 +2025,7 @@
19882025 }
19892026 return 0;
19902027 }
2028
+
19912029 /**
19922030 * Entry point to this code when a new device is created. Allocate the basic
19932031 * structures and the ring buffer for communication with the backend, and
....@@ -2072,6 +2110,7 @@
20722110 int rc;
20732111 struct bio *bio;
20742112 unsigned int segs;
2113
+ struct blkfront_ring_info *rinfo;
20752114
20762115 blkfront_gather_backend_features(info);
20772116 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
....@@ -2079,9 +2118,7 @@
20792118 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
20802119 blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
20812120
2082
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
2083
- struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
2084
-
2121
+ for_each_rinfo(info, rinfo, r_index) {
20852122 rc = blkfront_setup_indirect(rinfo);
20862123 if (rc)
20872124 return rc;
....@@ -2091,10 +2128,7 @@
20912128 /* Now safe for us to use the shared ring */
20922129 info->connected = BLKIF_STATE_CONNECTED;
20932130
2094
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
2095
- struct blkfront_ring_info *rinfo;
2096
-
2097
- rinfo = &info->rinfo[r_index];
2131
+ for_each_rinfo(info, rinfo, r_index) {
20982132 /* Kick any other new requests queued since we resumed */
20992133 kick_pending_request_queues(rinfo);
21002134 }
....@@ -2127,13 +2161,13 @@
21272161 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
21282162 int err = 0;
21292163 unsigned int i, j;
2164
+ struct blkfront_ring_info *rinfo;
21302165
21312166 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
21322167
21332168 bio_list_init(&info->bio_list);
21342169 INIT_LIST_HEAD(&info->requests);
2135
- for (i = 0; i < info->nr_rings; i++) {
2136
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
2170
+ for_each_rinfo(info, rinfo, i) {
21372171 struct bio_list merge_bio;
21382172 struct blk_shadow *shadow = rinfo->shadow;
21392173
....@@ -2261,17 +2295,18 @@
22612295 if (err)
22622296 goto out_of_memory;
22632297
2264
- if (!info->feature_persistent && info->max_indirect_segments) {
2298
+ if (!info->bounce && info->max_indirect_segments) {
22652299 /*
2266
- * We are using indirect descriptors but not persistent
2267
- * grants, we need to allocate a set of pages that can be
2300
+ * We are using indirect descriptors but don't have a bounce
2301
+ * buffer, we need to allocate a set of pages that can be
22682302 * used for mapping indirect grefs
22692303 */
22702304 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
22712305
22722306 BUG_ON(!list_empty(&rinfo->indirect_pages));
22732307 for (i = 0; i < num; i++) {
2274
- struct page *indirect_page = alloc_page(GFP_KERNEL);
2308
+ struct page *indirect_page = alloc_page(GFP_KERNEL |
2309
+ __GFP_ZERO);
22752310 if (!indirect_page)
22762311 goto out_of_memory;
22772312 list_add(&indirect_page->lru, &rinfo->indirect_pages);
....@@ -2360,9 +2395,12 @@
23602395 if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
23612396 blkfront_setup_discard(info);
23622397
2363
- info->feature_persistent =
2364
- !!xenbus_read_unsigned(info->xbdev->otherend,
2365
- "feature-persistent", 0);
2398
+ if (info->feature_persistent_parm)
2399
+ info->feature_persistent =
2400
+ !!xenbus_read_unsigned(info->xbdev->otherend,
2401
+ "feature-persistent", 0);
2402
+ if (info->feature_persistent)
2403
+ info->bounce = true;
23662404
23672405 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
23682406 "feature-max-indirect-segments", 0);
....@@ -2389,8 +2427,8 @@
23892427 unsigned long sector_size;
23902428 unsigned int physical_sector_size;
23912429 unsigned int binfo;
2392
- char *envp[] = { "RESIZE=1", NULL };
23932430 int err, i;
2431
+ struct blkfront_ring_info *rinfo;
23942432
23952433 switch (info->connected) {
23962434 case BLKIF_STATE_CONNECTED:
....@@ -2404,10 +2442,7 @@
24042442 return;
24052443 printk(KERN_INFO "Setting capacity to %Lu\n",
24062444 sectors);
2407
- set_capacity(info->gd, sectors);
2408
- revalidate_disk(info->gd);
2409
- kobject_uevent_env(&disk_to_dev(info->gd)->kobj,
2410
- KOBJ_CHANGE, envp);
2445
+ set_capacity_revalidate_and_notify(info->gd, sectors, true);
24112446
24122447 return;
24132448 case BLKIF_STATE_SUSPENDED:
....@@ -2448,8 +2483,8 @@
24482483 "physical-sector-size",
24492484 sector_size);
24502485 blkfront_gather_backend_features(info);
2451
- for (i = 0; i < info->nr_rings; i++) {
2452
- err = blkfront_setup_indirect(&info->rinfo[i]);
2486
+ for_each_rinfo(info, rinfo, i) {
2487
+ err = blkfront_setup_indirect(rinfo);
24532488 if (err) {
24542489 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
24552490 info->xbdev->otherend);
....@@ -2470,10 +2505,10 @@
24702505
24712506 /* Kick pending requests. */
24722507 info->connected = BLKIF_STATE_CONNECTED;
2473
- for (i = 0; i < info->nr_rings; i++)
2474
- kick_pending_request_queues(&info->rinfo[i]);
2508
+ for_each_rinfo(info, rinfo, i)
2509
+ kick_pending_request_queues(rinfo);
24752510
2476
- device_add_disk(&info->xbdev->dev, info->gd);
2511
+ device_add_disk(&info->xbdev->dev, info->gd, NULL);
24772512
24782513 info->is_ready = 1;
24792514 return;
....@@ -2530,7 +2565,7 @@
25302565 case XenbusStateClosed:
25312566 if (dev->state == XenbusStateClosed)
25322567 break;
2533
- /* fall through */
2568
+ fallthrough;
25342569 case XenbusStateClosing:
25352570 if (info)
25362571 blkfront_closing(info);
....@@ -2684,6 +2719,7 @@
26842719 .release = blkif_release,
26852720 .getgeo = blkif_getgeo,
26862721 .ioctl = blkif_ioctl,
2722
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
26872723 };
26882724
26892725
....@@ -2705,9 +2741,9 @@
27052741 {
27062742 unsigned int i;
27072743 unsigned long flags;
2744
+ struct blkfront_ring_info *rinfo;
27082745
2709
- for (i = 0; i < info->nr_rings; i++) {
2710
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
2746
+ for_each_rinfo(info, rinfo, i) {
27112747 struct grant *gnt_list_entry, *tmp;
27122748
27132749 spin_lock_irqsave(&rinfo->ring_lock, flags);
....@@ -2720,11 +2756,10 @@
27202756 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
27212757 node) {
27222758 if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2723
- gnttab_query_foreign_access(gnt_list_entry->gref))
2759
+ !gnttab_try_end_foreign_access(gnt_list_entry->gref))
27242760 continue;
27252761
27262762 list_del(&gnt_list_entry->node);
2727
- gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
27282763 rinfo->persistent_gnts_c--;
27292764 gnt_list_entry->gref = GRANT_INVALID_REF;
27302765 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
....@@ -2739,6 +2774,13 @@
27392774 struct blkfront_info *info;
27402775 bool need_schedule_work = false;
27412776
2777
+ /*
2778
+ * Note that when using bounce buffers but not persistent grants
2779
+ * there's no need to run blkfront_delay_work because grants are
2780
+ * revoked in blkif_completion or else an error is reported and the
2781
+ * connection is closed.
2782
+ */
2783
+
27422784 mutex_lock(&blkfront_mutex);
27432785
27442786 list_for_each_entry(info, &info_list, info_list) {