hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/9p/trans_xen.c
....@@ -43,8 +43,8 @@
4343 #include <net/9p/transport.h>
4444
4545 #define XEN_9PFS_NUM_RINGS 2
46
-#define XEN_9PFS_RING_ORDER 6
47
-#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
46
+#define XEN_9PFS_RING_ORDER 9
47
+#define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
4848
4949 struct xen_9pfs_header {
5050 uint32_t size;
....@@ -132,8 +132,8 @@
132132 prod = ring->intf->out_prod;
133133 virt_mb();
134134
135
- return XEN_9PFS_RING_SIZE -
136
- xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size;
135
+ return XEN_9PFS_RING_SIZE(ring) -
136
+ xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size;
137137 }
138138
139139 static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
....@@ -167,17 +167,18 @@
167167 prod = ring->intf->out_prod;
168168 virt_mb();
169169
170
- if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons,
171
- XEN_9PFS_RING_SIZE) < size) {
170
+ if (XEN_9PFS_RING_SIZE(ring) -
171
+ xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) {
172172 spin_unlock_irqrestore(&ring->lock, flags);
173173 goto again;
174174 }
175175
176
- masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
177
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
176
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
177
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
178178
179179 xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
180
- &masked_prod, masked_cons, XEN_9PFS_RING_SIZE);
180
+ &masked_prod, masked_cons,
181
+ XEN_9PFS_RING_SIZE(ring));
181182
182183 p9_req->status = REQ_STATUS_SENT;
183184 virt_wmb(); /* write ring before updating pointer */
....@@ -207,19 +208,19 @@
207208 prod = ring->intf->in_prod;
208209 virt_rmb();
209210
210
- if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) <
211
+ if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) <
211212 sizeof(h)) {
212213 notify_remote_via_irq(ring->irq);
213214 return;
214215 }
215216
216
- masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
217
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
217
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
218
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
218219
219220 /* First, read just the header */
220221 xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
221222 masked_prod, &masked_cons,
222
- XEN_9PFS_RING_SIZE);
223
+ XEN_9PFS_RING_SIZE(ring));
223224
224225 req = p9_tag_lookup(priv->client, h.tag);
225226 if (!req || req->status != REQ_STATUS_SENT) {
....@@ -230,15 +231,24 @@
230231 continue;
231232 }
232233
234
+ if (h.size > req->rc.capacity) {
235
+ dev_warn(&priv->dev->dev,
236
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
237
+ h.size, h.tag, req->rc.capacity);
238
+ req->status = REQ_STATUS_ERROR;
239
+ goto recv_error;
240
+ }
241
+
233242 memcpy(&req->rc, &h, sizeof(h));
234243 req->rc.offset = 0;
235244
236
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
245
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
237246 /* Then, read the whole packet (including the header) */
238247 xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
239248 masked_prod, &masked_cons,
240
- XEN_9PFS_RING_SIZE);
249
+ XEN_9PFS_RING_SIZE(ring));
241250
251
+recv_error:
242252 virt_mb();
243253 cons += h.size;
244254 ring->intf->in_cons = cons;
....@@ -267,7 +277,7 @@
267277
268278 static struct p9_trans_module p9_xen_trans = {
269279 .name = "xen",
270
- .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT),
280
+ .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2),
271281 .def = 1,
272282 .create = p9_xen_create,
273283 .close = p9_xen_close,
....@@ -290,20 +300,26 @@
290300 write_unlock(&xen_9pfs_lock);
291301
292302 for (i = 0; i < priv->num_rings; i++) {
303
+ struct xen_9pfs_dataring *ring = &priv->rings[i];
304
+
305
+ cancel_work_sync(&ring->work);
306
+
293307 if (!priv->rings[i].intf)
294308 break;
295309 if (priv->rings[i].irq > 0)
296310 unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
297311 if (priv->rings[i].data.in) {
298
- for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) {
312
+ for (j = 0;
313
+ j < (1 << priv->rings[i].intf->ring_order);
314
+ j++) {
299315 grant_ref_t ref;
300316
301317 ref = priv->rings[i].intf->ref[j];
302318 gnttab_end_foreign_access(ref, 0, 0);
303319 }
304
- free_pages((unsigned long)priv->rings[i].data.in,
305
- XEN_9PFS_RING_ORDER -
306
- (PAGE_SHIFT - XEN_PAGE_SHIFT));
320
+ free_pages_exact(priv->rings[i].data.in,
321
+ 1UL << (priv->rings[i].intf->ring_order +
322
+ XEN_PAGE_SHIFT));
307323 }
308324 gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
309325 free_page((unsigned long)priv->rings[i].intf);
....@@ -323,7 +339,8 @@
323339 }
324340
325341 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
326
- struct xen_9pfs_dataring *ring)
342
+ struct xen_9pfs_dataring *ring,
343
+ unsigned int order)
327344 {
328345 int i = 0;
329346 int ret = -ENOMEM;
....@@ -341,22 +358,22 @@
341358 if (ret < 0)
342359 goto out;
343360 ring->ref = ret;
344
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
345
- XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
361
+ bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
362
+ GFP_KERNEL | __GFP_ZERO);
346363 if (!bytes) {
347364 ret = -ENOMEM;
348365 goto out;
349366 }
350
- for (; i < (1 << XEN_9PFS_RING_ORDER); i++) {
367
+ for (; i < (1 << order); i++) {
351368 ret = gnttab_grant_foreign_access(
352369 dev->otherend_id, virt_to_gfn(bytes) + i, 0);
353370 if (ret < 0)
354371 goto out;
355372 ring->intf->ref[i] = ret;
356373 }
357
- ring->intf->ring_order = XEN_9PFS_RING_ORDER;
374
+ ring->intf->ring_order = order;
358375 ring->data.in = bytes;
359
- ring->data.out = bytes + XEN_9PFS_RING_SIZE;
376
+ ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
360377
361378 ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
362379 if (ret)
....@@ -373,28 +390,31 @@
373390 if (bytes) {
374391 for (i--; i >= 0; i--)
375392 gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
376
- free_pages((unsigned long)bytes,
377
- XEN_9PFS_RING_ORDER -
378
- (PAGE_SHIFT - XEN_PAGE_SHIFT));
393
+ free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
379394 }
380395 gnttab_end_foreign_access(ring->ref, 0, 0);
381396 free_page((unsigned long)ring->intf);
382397 return ret;
383398 }
384399
385
-static int xen_9pfs_front_probe(struct xenbus_device *dev,
386
- const struct xenbus_device_id *id)
400
+static int xen_9pfs_front_init(struct xenbus_device *dev)
387401 {
388402 int ret, i;
389403 struct xenbus_transaction xbt;
390
- struct xen_9pfs_front_priv *priv = NULL;
391
- char *versions;
404
+ struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
405
+ char *versions, *v;
392406 unsigned int max_rings, max_ring_order, len = 0;
393407
394408 versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
395409 if (IS_ERR(versions))
396410 return PTR_ERR(versions);
397
- if (strcmp(versions, "1")) {
411
+ for (v = versions; *v; v++) {
412
+ if (simple_strtoul(v, &v, 10) == 1) {
413
+ v = NULL;
414
+ break;
415
+ }
416
+ }
417
+ if (v) {
398418 kfree(versions);
399419 return -EINVAL;
400420 }
....@@ -404,14 +424,11 @@
404424 return -EINVAL;
405425 max_ring_order = xenbus_read_unsigned(dev->otherend,
406426 "max-ring-page-order", 0);
407
- if (max_ring_order < XEN_9PFS_RING_ORDER)
408
- return -EINVAL;
427
+ if (max_ring_order > XEN_9PFS_RING_ORDER)
428
+ max_ring_order = XEN_9PFS_RING_ORDER;
429
+ if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
430
+ p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
409431
410
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
411
- if (!priv)
412
- return -ENOMEM;
413
-
414
- priv->dev = dev;
415432 priv->num_rings = XEN_9PFS_NUM_RINGS;
416433 priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
417434 GFP_KERNEL);
....@@ -422,7 +439,8 @@
422439
423440 for (i = 0; i < priv->num_rings; i++) {
424441 priv->rings[i].priv = priv;
425
- ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
442
+ ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
443
+ max_ring_order);
426444 if (ret < 0)
427445 goto error;
428446 }
....@@ -444,13 +462,13 @@
444462 char str[16];
445463
446464 BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
447
- sprintf(str, "ring-ref%u", i);
465
+ sprintf(str, "ring-ref%d", i);
448466 ret = xenbus_printf(xbt, dev->nodename, str, "%d",
449467 priv->rings[i].ref);
450468 if (ret)
451469 goto error_xenbus;
452470
453
- sprintf(str, "event-channel-%u", i);
471
+ sprintf(str, "event-channel-%d", i);
454472 ret = xenbus_printf(xbt, dev->nodename, str, "%u",
455473 priv->rings[i].evtchn);
456474 if (ret)
....@@ -469,21 +487,33 @@
469487 goto error;
470488 }
471489
472
- write_lock(&xen_9pfs_lock);
473
- list_add_tail(&priv->list, &xen_9pfs_devs);
474
- write_unlock(&xen_9pfs_lock);
475
- dev_set_drvdata(&dev->dev, priv);
476
- xenbus_switch_state(dev, XenbusStateInitialised);
477
-
478490 return 0;
479491
480492 error_xenbus:
481493 xenbus_transaction_end(xbt, 1);
482494 xenbus_dev_fatal(dev, ret, "writing xenstore");
483495 error:
484
- dev_set_drvdata(&dev->dev, NULL);
485496 xen_9pfs_front_free(priv);
486497 return ret;
498
+}
499
+
500
+static int xen_9pfs_front_probe(struct xenbus_device *dev,
501
+ const struct xenbus_device_id *id)
502
+{
503
+ struct xen_9pfs_front_priv *priv = NULL;
504
+
505
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
506
+ if (!priv)
507
+ return -ENOMEM;
508
+
509
+ priv->dev = dev;
510
+ dev_set_drvdata(&dev->dev, priv);
511
+
512
+ write_lock(&xen_9pfs_lock);
513
+ list_add_tail(&priv->list, &xen_9pfs_devs);
514
+ write_unlock(&xen_9pfs_lock);
515
+
516
+ return 0;
487517 }
488518
489519 static int xen_9pfs_front_resume(struct xenbus_device *dev)
....@@ -504,6 +534,8 @@
504534 break;
505535
506536 case XenbusStateInitWait:
537
+ if (!xen_9pfs_front_init(dev))
538
+ xenbus_switch_state(dev, XenbusStateInitialised);
507539 break;
508540
509541 case XenbusStateConnected:
....@@ -513,7 +545,7 @@
513545 case XenbusStateClosed:
514546 if (dev->state == XenbusStateClosed)
515547 break;
516
- /* Missed the backend's CLOSING state -- fallthrough */
548
+ fallthrough; /* Missed the backend's CLOSING state */
517549 case XenbusStateClosing:
518550 xenbus_frontend_closed(dev);
519551 break;