forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/net/9p/trans_xen.c
....@@ -43,8 +43,8 @@
4343 #include <net/9p/transport.h>
4444
4545 #define XEN_9PFS_NUM_RINGS 2
46
-#define XEN_9PFS_RING_ORDER 6
47
-#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
46
+#define XEN_9PFS_RING_ORDER 9
47
+#define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
4848
4949 struct xen_9pfs_header {
5050 uint32_t size;
....@@ -132,8 +132,8 @@
132132 prod = ring->intf->out_prod;
133133 virt_mb();
134134
135
- return XEN_9PFS_RING_SIZE -
136
- xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size;
135
+ return XEN_9PFS_RING_SIZE(ring) -
136
+ xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size;
137137 }
138138
139139 static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
....@@ -167,17 +167,18 @@
167167 prod = ring->intf->out_prod;
168168 virt_mb();
169169
170
- if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons,
171
- XEN_9PFS_RING_SIZE) < size) {
170
+ if (XEN_9PFS_RING_SIZE(ring) -
171
+ xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) {
172172 spin_unlock_irqrestore(&ring->lock, flags);
173173 goto again;
174174 }
175175
176
- masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
177
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
176
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
177
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
178178
179179 xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
180
- &masked_prod, masked_cons, XEN_9PFS_RING_SIZE);
180
+ &masked_prod, masked_cons,
181
+ XEN_9PFS_RING_SIZE(ring));
181182
182183 p9_req->status = REQ_STATUS_SENT;
183184 virt_wmb(); /* write ring before updating pointer */
....@@ -207,19 +208,19 @@
207208 prod = ring->intf->in_prod;
208209 virt_rmb();
209210
210
- if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) <
211
+ if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) <
211212 sizeof(h)) {
212213 notify_remote_via_irq(ring->irq);
213214 return;
214215 }
215216
216
- masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
217
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
217
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
218
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
218219
219220 /* First, read just the header */
220221 xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
221222 masked_prod, &masked_cons,
222
- XEN_9PFS_RING_SIZE);
223
+ XEN_9PFS_RING_SIZE(ring));
223224
224225 req = p9_tag_lookup(priv->client, h.tag);
225226 if (!req || req->status != REQ_STATUS_SENT) {
....@@ -230,15 +231,24 @@
230231 continue;
231232 }
232233
234
+ if (h.size > req->rc.capacity) {
235
+ dev_warn(&priv->dev->dev,
236
+ "requested packet size too big: %d for tag %d with capacity %zd\n",
237
+ h.size, h.tag, req->rc.capacity);
238
+ req->status = REQ_STATUS_ERROR;
239
+ goto recv_error;
240
+ }
241
+
233242 memcpy(&req->rc, &h, sizeof(h));
234243 req->rc.offset = 0;
235244
236
- masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
245
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
237246 /* Then, read the whole packet (including the header) */
238247 xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
239248 masked_prod, &masked_cons,
240
- XEN_9PFS_RING_SIZE);
249
+ XEN_9PFS_RING_SIZE(ring));
241250
251
+recv_error:
242252 virt_mb();
243253 cons += h.size;
244254 ring->intf->in_cons = cons;
....@@ -267,7 +277,7 @@
267277
268278 static struct p9_trans_module p9_xen_trans = {
269279 .name = "xen",
270
- .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT),
280
+ .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2),
271281 .def = 1,
272282 .create = p9_xen_create,
273283 .close = p9_xen_close,
....@@ -295,15 +305,17 @@
295305 if (priv->rings[i].irq > 0)
296306 unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
297307 if (priv->rings[i].data.in) {
298
- for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) {
308
+ for (j = 0;
309
+ j < (1 << priv->rings[i].intf->ring_order);
310
+ j++) {
299311 grant_ref_t ref;
300312
301313 ref = priv->rings[i].intf->ref[j];
302314 gnttab_end_foreign_access(ref, 0, 0);
303315 }
304
- free_pages((unsigned long)priv->rings[i].data.in,
305
- XEN_9PFS_RING_ORDER -
306
- (PAGE_SHIFT - XEN_PAGE_SHIFT));
316
+ free_pages_exact(priv->rings[i].data.in,
317
+ 1UL << (priv->rings[i].intf->ring_order +
318
+ XEN_PAGE_SHIFT));
307319 }
308320 gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
309321 free_page((unsigned long)priv->rings[i].intf);
....@@ -323,7 +335,8 @@
323335 }
324336
325337 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
326
- struct xen_9pfs_dataring *ring)
338
+ struct xen_9pfs_dataring *ring,
339
+ unsigned int order)
327340 {
328341 int i = 0;
329342 int ret = -ENOMEM;
....@@ -341,22 +354,22 @@
341354 if (ret < 0)
342355 goto out;
343356 ring->ref = ret;
344
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
345
- XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
357
+ bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
358
+ GFP_KERNEL | __GFP_ZERO);
346359 if (!bytes) {
347360 ret = -ENOMEM;
348361 goto out;
349362 }
350
- for (; i < (1 << XEN_9PFS_RING_ORDER); i++) {
363
+ for (; i < (1 << order); i++) {
351364 ret = gnttab_grant_foreign_access(
352365 dev->otherend_id, virt_to_gfn(bytes) + i, 0);
353366 if (ret < 0)
354367 goto out;
355368 ring->intf->ref[i] = ret;
356369 }
357
- ring->intf->ring_order = XEN_9PFS_RING_ORDER;
370
+ ring->intf->ring_order = order;
358371 ring->data.in = bytes;
359
- ring->data.out = bytes + XEN_9PFS_RING_SIZE;
372
+ ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
360373
361374 ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
362375 if (ret)
....@@ -373,9 +386,7 @@
373386 if (bytes) {
374387 for (i--; i >= 0; i--)
375388 gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
376
- free_pages((unsigned long)bytes,
377
- XEN_9PFS_RING_ORDER -
378
- (PAGE_SHIFT - XEN_PAGE_SHIFT));
389
+ free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
379390 }
380391 gnttab_end_foreign_access(ring->ref, 0, 0);
381392 free_page((unsigned long)ring->intf);
....@@ -404,8 +415,10 @@
404415 return -EINVAL;
405416 max_ring_order = xenbus_read_unsigned(dev->otherend,
406417 "max-ring-page-order", 0);
407
- if (max_ring_order < XEN_9PFS_RING_ORDER)
408
- return -EINVAL;
418
+ if (max_ring_order > XEN_9PFS_RING_ORDER)
419
+ max_ring_order = XEN_9PFS_RING_ORDER;
420
+ if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
421
+ p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
409422
410423 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
411424 if (!priv)
....@@ -422,7 +435,8 @@
422435
423436 for (i = 0; i < priv->num_rings; i++) {
424437 priv->rings[i].priv = priv;
425
- ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
438
+ ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
439
+ max_ring_order);
426440 if (ret < 0)
427441 goto error;
428442 }
....@@ -444,13 +458,13 @@
444458 char str[16];
445459
446460 BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
447
- sprintf(str, "ring-ref%u", i);
461
+ sprintf(str, "ring-ref%d", i);
448462 ret = xenbus_printf(xbt, dev->nodename, str, "%d",
449463 priv->rings[i].ref);
450464 if (ret)
451465 goto error_xenbus;
452466
453
- sprintf(str, "event-channel-%u", i);
467
+ sprintf(str, "event-channel-%d", i);
454468 ret = xenbus_printf(xbt, dev->nodename, str, "%u",
455469 priv->rings[i].evtchn);
456470 if (ret)
....@@ -513,7 +527,7 @@
513527 case XenbusStateClosed:
514528 if (dev->state == XenbusStateClosed)
515529 break;
516
- /* Missed the backend's CLOSING state -- fallthrough */
530
+ fallthrough; /* Missed the backend's CLOSING state */
517531 case XenbusStateClosing:
518532 xenbus_frontend_closed(dev);
519533 break;