| .. | .. |
|---|
| 43 | 43 | #include <net/9p/transport.h> |
|---|
| 44 | 44 | |
|---|
| 45 | 45 | #define XEN_9PFS_NUM_RINGS 2 |
|---|
| 46 | | -#define XEN_9PFS_RING_ORDER 6 |
|---|
| 47 | | -#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER) |
|---|
| 46 | +#define XEN_9PFS_RING_ORDER 9 |
|---|
| 47 | +#define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order) |
|---|
| 48 | 48 | |
|---|
| 49 | 49 | struct xen_9pfs_header { |
|---|
| 50 | 50 | uint32_t size; |
|---|
| .. | .. |
|---|
| 132 | 132 | prod = ring->intf->out_prod; |
|---|
| 133 | 133 | virt_mb(); |
|---|
| 134 | 134 | |
|---|
| 135 | | - return XEN_9PFS_RING_SIZE - |
|---|
| 136 | | - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size; |
|---|
| 135 | + return XEN_9PFS_RING_SIZE(ring) - |
|---|
| 136 | + xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size; |
|---|
| 137 | 137 | } |
|---|
| 138 | 138 | |
|---|
| 139 | 139 | static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) |
|---|
| .. | .. |
|---|
| 167 | 167 | prod = ring->intf->out_prod; |
|---|
| 168 | 168 | virt_mb(); |
|---|
| 169 | 169 | |
|---|
| 170 | | - if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, |
|---|
| 171 | | - XEN_9PFS_RING_SIZE) < size) { |
|---|
| 170 | + if (XEN_9PFS_RING_SIZE(ring) - |
|---|
| 171 | + xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) { |
|---|
| 172 | 172 | spin_unlock_irqrestore(&ring->lock, flags); |
|---|
| 173 | 173 | goto again; |
|---|
| 174 | 174 | } |
|---|
| 175 | 175 | |
|---|
| 176 | | - masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE); |
|---|
| 177 | | - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); |
|---|
| 176 | + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring)); |
|---|
| 177 | + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); |
|---|
| 178 | 178 | |
|---|
| 179 | 179 | xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size, |
|---|
| 180 | | - &masked_prod, masked_cons, XEN_9PFS_RING_SIZE); |
|---|
| 180 | + &masked_prod, masked_cons, |
|---|
| 181 | + XEN_9PFS_RING_SIZE(ring)); |
|---|
| 181 | 182 | |
|---|
| 182 | 183 | p9_req->status = REQ_STATUS_SENT; |
|---|
| 183 | 184 | virt_wmb(); /* write ring before updating pointer */ |
|---|
| .. | .. |
|---|
| 207 | 208 | prod = ring->intf->in_prod; |
|---|
| 208 | 209 | virt_rmb(); |
|---|
| 209 | 210 | |
|---|
| 210 | | - if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < |
|---|
| 211 | + if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < |
|---|
| 211 | 212 | sizeof(h)) { |
|---|
| 212 | 213 | notify_remote_via_irq(ring->irq); |
|---|
| 213 | 214 | return; |
|---|
| 214 | 215 | } |
|---|
| 215 | 216 | |
|---|
| 216 | | - masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE); |
|---|
| 217 | | - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); |
|---|
| 217 | + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring)); |
|---|
| 218 | + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); |
|---|
| 218 | 219 | |
|---|
| 219 | 220 | /* First, read just the header */ |
|---|
| 220 | 221 | xen_9pfs_read_packet(&h, ring->data.in, sizeof(h), |
|---|
| 221 | 222 | masked_prod, &masked_cons, |
|---|
| 222 | | - XEN_9PFS_RING_SIZE); |
|---|
| 223 | + XEN_9PFS_RING_SIZE(ring)); |
|---|
| 223 | 224 | |
|---|
| 224 | 225 | req = p9_tag_lookup(priv->client, h.tag); |
|---|
| 225 | 226 | if (!req || req->status != REQ_STATUS_SENT) { |
|---|
| .. | .. |
|---|
| 230 | 231 | continue; |
|---|
| 231 | 232 | } |
|---|
| 232 | 233 | |
|---|
| 234 | + if (h.size > req->rc.capacity) { |
|---|
| 235 | + dev_warn(&priv->dev->dev, |
|---|
| 236 | + "requested packet size too big: %d for tag %d with capacity %zd\n", |
|---|
| 237 | + h.size, h.tag, req->rc.capacity); |
|---|
| 238 | + req->status = REQ_STATUS_ERROR; |
|---|
| 239 | + goto recv_error; |
|---|
| 240 | + } |
|---|
| 241 | + |
|---|
| 233 | 242 | memcpy(&req->rc, &h, sizeof(h)); |
|---|
| 234 | 243 | req->rc.offset = 0; |
|---|
| 235 | 244 | |
|---|
| 236 | | - masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE); |
|---|
| 245 | + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring)); |
|---|
| 237 | 246 | /* Then, read the whole packet (including the header) */ |
|---|
| 238 | 247 | xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size, |
|---|
| 239 | 248 | masked_prod, &masked_cons, |
|---|
| 240 | | - XEN_9PFS_RING_SIZE); |
|---|
| 249 | + XEN_9PFS_RING_SIZE(ring)); |
|---|
| 241 | 250 | |
|---|
| 251 | +recv_error: |
|---|
| 242 | 252 | virt_mb(); |
|---|
| 243 | 253 | cons += h.size; |
|---|
| 244 | 254 | ring->intf->in_cons = cons; |
|---|
| .. | .. |
|---|
| 267 | 277 | |
|---|
| 268 | 278 | static struct p9_trans_module p9_xen_trans = { |
|---|
| 269 | 279 | .name = "xen", |
|---|
| 270 | | - .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT), |
|---|
| 280 | + .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2), |
|---|
| 271 | 281 | .def = 1, |
|---|
| 272 | 282 | .create = p9_xen_create, |
|---|
| 273 | 283 | .close = p9_xen_close, |
|---|
| .. | .. |
|---|
| 290 | 300 | write_unlock(&xen_9pfs_lock); |
|---|
| 291 | 301 | |
|---|
| 292 | 302 | for (i = 0; i < priv->num_rings; i++) { |
|---|
| 303 | + struct xen_9pfs_dataring *ring = &priv->rings[i]; |
|---|
| 304 | + |
|---|
| 305 | + cancel_work_sync(&ring->work); |
|---|
| 306 | + |
|---|
| 293 | 307 | if (!priv->rings[i].intf) |
|---|
| 294 | 308 | break; |
|---|
| 295 | 309 | if (priv->rings[i].irq > 0) |
|---|
| 296 | 310 | unbind_from_irqhandler(priv->rings[i].irq, priv->dev); |
|---|
| 297 | 311 | if (priv->rings[i].data.in) { |
|---|
| 298 | | - for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) { |
|---|
| 312 | + for (j = 0; |
|---|
| 313 | + j < (1 << priv->rings[i].intf->ring_order); |
|---|
| 314 | + j++) { |
|---|
| 299 | 315 | grant_ref_t ref; |
|---|
| 300 | 316 | |
|---|
| 301 | 317 | ref = priv->rings[i].intf->ref[j]; |
|---|
| 302 | 318 | gnttab_end_foreign_access(ref, 0, 0); |
|---|
| 303 | 319 | } |
|---|
| 304 | | - free_pages((unsigned long)priv->rings[i].data.in, |
|---|
| 305 | | - XEN_9PFS_RING_ORDER - |
|---|
| 306 | | - (PAGE_SHIFT - XEN_PAGE_SHIFT)); |
|---|
| 320 | + free_pages_exact(priv->rings[i].data.in, |
|---|
| 321 | + 1UL << (priv->rings[i].intf->ring_order + |
|---|
| 322 | + XEN_PAGE_SHIFT)); |
|---|
| 307 | 323 | } |
|---|
| 308 | 324 | gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); |
|---|
| 309 | 325 | free_page((unsigned long)priv->rings[i].intf); |
|---|
| .. | .. |
|---|
| 323 | 339 | } |
|---|
| 324 | 340 | |
|---|
| 325 | 341 | static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, |
|---|
| 326 | | - struct xen_9pfs_dataring *ring) |
|---|
| 342 | + struct xen_9pfs_dataring *ring, |
|---|
| 343 | + unsigned int order) |
|---|
| 327 | 344 | { |
|---|
| 328 | 345 | int i = 0; |
|---|
| 329 | 346 | int ret = -ENOMEM; |
|---|
| .. | .. |
|---|
| 341 | 358 | if (ret < 0) |
|---|
| 342 | 359 | goto out; |
|---|
| 343 | 360 | ring->ref = ret; |
|---|
| 344 | | - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
|---|
| 345 | | - XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT)); |
|---|
| 361 | + bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT), |
|---|
| 362 | + GFP_KERNEL | __GFP_ZERO); |
|---|
| 346 | 363 | if (!bytes) { |
|---|
| 347 | 364 | ret = -ENOMEM; |
|---|
| 348 | 365 | goto out; |
|---|
| 349 | 366 | } |
|---|
| 350 | | - for (; i < (1 << XEN_9PFS_RING_ORDER); i++) { |
|---|
| 367 | + for (; i < (1 << order); i++) { |
|---|
| 351 | 368 | ret = gnttab_grant_foreign_access( |
|---|
| 352 | 369 | dev->otherend_id, virt_to_gfn(bytes) + i, 0); |
|---|
| 353 | 370 | if (ret < 0) |
|---|
| 354 | 371 | goto out; |
|---|
| 355 | 372 | ring->intf->ref[i] = ret; |
|---|
| 356 | 373 | } |
|---|
| 357 | | - ring->intf->ring_order = XEN_9PFS_RING_ORDER; |
|---|
| 374 | + ring->intf->ring_order = order; |
|---|
| 358 | 375 | ring->data.in = bytes; |
|---|
| 359 | | - ring->data.out = bytes + XEN_9PFS_RING_SIZE; |
|---|
| 376 | + ring->data.out = bytes + XEN_FLEX_RING_SIZE(order); |
|---|
| 360 | 377 | |
|---|
| 361 | 378 | ret = xenbus_alloc_evtchn(dev, &ring->evtchn); |
|---|
| 362 | 379 | if (ret) |
|---|
| .. | .. |
|---|
| 373 | 390 | if (bytes) { |
|---|
| 374 | 391 | for (i--; i >= 0; i--) |
|---|
| 375 | 392 | gnttab_end_foreign_access(ring->intf->ref[i], 0, 0); |
|---|
| 376 | | - free_pages((unsigned long)bytes, |
|---|
| 377 | | - XEN_9PFS_RING_ORDER - |
|---|
| 378 | | - (PAGE_SHIFT - XEN_PAGE_SHIFT)); |
|---|
| 393 | + free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); |
|---|
| 379 | 394 | } |
|---|
| 380 | 395 | gnttab_end_foreign_access(ring->ref, 0, 0); |
|---|
| 381 | 396 | free_page((unsigned long)ring->intf); |
|---|
| 382 | 397 | return ret; |
|---|
| 383 | 398 | } |
|---|
| 384 | 399 | |
|---|
| 385 | | -static int xen_9pfs_front_probe(struct xenbus_device *dev, |
|---|
| 386 | | - const struct xenbus_device_id *id) |
|---|
| 400 | +static int xen_9pfs_front_init(struct xenbus_device *dev) |
|---|
| 387 | 401 | { |
|---|
| 388 | 402 | int ret, i; |
|---|
| 389 | 403 | struct xenbus_transaction xbt; |
|---|
| 390 | | - struct xen_9pfs_front_priv *priv = NULL; |
|---|
| 391 | | - char *versions; |
|---|
| 404 | + struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev); |
|---|
| 405 | + char *versions, *v; |
|---|
| 392 | 406 | unsigned int max_rings, max_ring_order, len = 0; |
|---|
| 393 | 407 | |
|---|
| 394 | 408 | versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len); |
|---|
| 395 | 409 | if (IS_ERR(versions)) |
|---|
| 396 | 410 | return PTR_ERR(versions); |
|---|
| 397 | | - if (strcmp(versions, "1")) { |
|---|
| 411 | + for (v = versions; *v; v++) { |
|---|
| 412 | + if (simple_strtoul(v, &v, 10) == 1) { |
|---|
| 413 | + v = NULL; |
|---|
| 414 | + break; |
|---|
| 415 | + } |
|---|
| 416 | + } |
|---|
| 417 | + if (v) { |
|---|
| 398 | 418 | kfree(versions); |
|---|
| 399 | 419 | return -EINVAL; |
|---|
| 400 | 420 | } |
|---|
| .. | .. |
|---|
| 404 | 424 | return -EINVAL; |
|---|
| 405 | 425 | max_ring_order = xenbus_read_unsigned(dev->otherend, |
|---|
| 406 | 426 | "max-ring-page-order", 0); |
|---|
| 407 | | - if (max_ring_order < XEN_9PFS_RING_ORDER) |
|---|
| 408 | | - return -EINVAL; |
|---|
| 427 | + if (max_ring_order > XEN_9PFS_RING_ORDER) |
|---|
| 428 | + max_ring_order = XEN_9PFS_RING_ORDER; |
|---|
| 429 | + if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order)) |
|---|
| 430 | + p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2; |
|---|
| 409 | 431 | |
|---|
| 410 | | - priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
|---|
| 411 | | - if (!priv) |
|---|
| 412 | | - return -ENOMEM; |
|---|
| 413 | | - |
|---|
| 414 | | - priv->dev = dev; |
|---|
| 415 | 432 | priv->num_rings = XEN_9PFS_NUM_RINGS; |
|---|
| 416 | 433 | priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings), |
|---|
| 417 | 434 | GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 422 | 439 | |
|---|
| 423 | 440 | for (i = 0; i < priv->num_rings; i++) { |
|---|
| 424 | 441 | priv->rings[i].priv = priv; |
|---|
| 425 | | - ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]); |
|---|
| 442 | + ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i], |
|---|
| 443 | + max_ring_order); |
|---|
| 426 | 444 | if (ret < 0) |
|---|
| 427 | 445 | goto error; |
|---|
| 428 | 446 | } |
|---|
| .. | .. |
|---|
| 444 | 462 | char str[16]; |
|---|
| 445 | 463 | |
|---|
| 446 | 464 | BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9); |
|---|
| 447 | | - sprintf(str, "ring-ref%u", i); |
|---|
| 465 | + sprintf(str, "ring-ref%d", i); |
|---|
| 448 | 466 | ret = xenbus_printf(xbt, dev->nodename, str, "%d", |
|---|
| 449 | 467 | priv->rings[i].ref); |
|---|
| 450 | 468 | if (ret) |
|---|
| 451 | 469 | goto error_xenbus; |
|---|
| 452 | 470 | |
|---|
| 453 | | - sprintf(str, "event-channel-%u", i); |
|---|
| 471 | + sprintf(str, "event-channel-%d", i); |
|---|
| 454 | 472 | ret = xenbus_printf(xbt, dev->nodename, str, "%u", |
|---|
| 455 | 473 | priv->rings[i].evtchn); |
|---|
| 456 | 474 | if (ret) |
|---|
| .. | .. |
|---|
| 469 | 487 | goto error; |
|---|
| 470 | 488 | } |
|---|
| 471 | 489 | |
|---|
| 472 | | - write_lock(&xen_9pfs_lock); |
|---|
| 473 | | - list_add_tail(&priv->list, &xen_9pfs_devs); |
|---|
| 474 | | - write_unlock(&xen_9pfs_lock); |
|---|
| 475 | | - dev_set_drvdata(&dev->dev, priv); |
|---|
| 476 | | - xenbus_switch_state(dev, XenbusStateInitialised); |
|---|
| 477 | | - |
|---|
| 478 | 490 | return 0; |
|---|
| 479 | 491 | |
|---|
| 480 | 492 | error_xenbus: |
|---|
| 481 | 493 | xenbus_transaction_end(xbt, 1); |
|---|
| 482 | 494 | xenbus_dev_fatal(dev, ret, "writing xenstore"); |
|---|
| 483 | 495 | error: |
|---|
| 484 | | - dev_set_drvdata(&dev->dev, NULL); |
|---|
| 485 | 496 | xen_9pfs_front_free(priv); |
|---|
| 486 | 497 | return ret; |
|---|
| 498 | +} |
|---|
| 499 | + |
|---|
| 500 | +static int xen_9pfs_front_probe(struct xenbus_device *dev, |
|---|
| 501 | + const struct xenbus_device_id *id) |
|---|
| 502 | +{ |
|---|
| 503 | + struct xen_9pfs_front_priv *priv = NULL; |
|---|
| 504 | + |
|---|
| 505 | + priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
|---|
| 506 | + if (!priv) |
|---|
| 507 | + return -ENOMEM; |
|---|
| 508 | + |
|---|
| 509 | + priv->dev = dev; |
|---|
| 510 | + dev_set_drvdata(&dev->dev, priv); |
|---|
| 511 | + |
|---|
| 512 | + write_lock(&xen_9pfs_lock); |
|---|
| 513 | + list_add_tail(&priv->list, &xen_9pfs_devs); |
|---|
| 514 | + write_unlock(&xen_9pfs_lock); |
|---|
| 515 | + |
|---|
| 516 | + return 0; |
|---|
| 487 | 517 | } |
|---|
| 488 | 518 | |
|---|
| 489 | 519 | static int xen_9pfs_front_resume(struct xenbus_device *dev) |
|---|
| .. | .. |
|---|
| 504 | 534 | break; |
|---|
| 505 | 535 | |
|---|
| 506 | 536 | case XenbusStateInitWait: |
|---|
| 537 | + if (!xen_9pfs_front_init(dev)) |
|---|
| 538 | + xenbus_switch_state(dev, XenbusStateInitialised); |
|---|
| 507 | 539 | break; |
|---|
| 508 | 540 | |
|---|
| 509 | 541 | case XenbusStateConnected: |
|---|
| .. | .. |
|---|
| 513 | 545 | case XenbusStateClosed: |
|---|
| 514 | 546 | if (dev->state == XenbusStateClosed) |
|---|
| 515 | 547 | break; |
|---|
| 516 | | - /* Missed the backend's CLOSING state -- fallthrough */ |
|---|
| 548 | + fallthrough; /* Missed the backend's CLOSING state */ |
|---|
| 517 | 549 | case XenbusStateClosing: |
|---|
| 518 | 550 | xenbus_frontend_closed(dev); |
|---|
| 519 | 551 | break; |
|---|