| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/fs/nfs/write.c |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 26 | 27 | #include <linux/iversion.h> |
|---|
| 27 | 28 | |
|---|
| 28 | 29 | #include <linux/uaccess.h> |
|---|
| 30 | +#include <linux/sched/mm.h> |
|---|
| 29 | 31 | |
|---|
| 30 | 32 | #include "delegation.h" |
|---|
| 31 | 33 | #include "internal.h" |
|---|
| .. | .. |
|---|
| 55 | 57 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; |
|---|
| 56 | 58 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
|---|
| 57 | 59 | static const struct nfs_rw_ops nfs_rw_write_ops; |
|---|
| 58 | | -static void nfs_clear_request_commit(struct nfs_page *req); |
|---|
| 60 | +static void nfs_inode_remove_request(struct nfs_page *req); |
|---|
| 61 | +static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
|---|
| 62 | + struct nfs_page *req); |
|---|
| 59 | 63 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, |
|---|
| 60 | 64 | struct inode *inode); |
|---|
| 61 | 65 | static struct nfs_page * |
|---|
| .. | .. |
|---|
| 67 | 71 | static struct kmem_cache *nfs_cdata_cachep; |
|---|
| 68 | 72 | static mempool_t *nfs_commit_mempool; |
|---|
| 69 | 73 | |
|---|
| 70 | | -struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) |
|---|
| 74 | +struct nfs_commit_data *nfs_commitdata_alloc(void) |
|---|
| 71 | 75 | { |
|---|
| 72 | 76 | struct nfs_commit_data *p; |
|---|
| 73 | 77 | |
|---|
| 74 | | - if (never_fail) |
|---|
| 75 | | - p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); |
|---|
| 76 | | - else { |
|---|
| 77 | | - /* It is OK to do some reclaim, not no safe to wait |
|---|
| 78 | | - * for anything to be returned to the pool. |
|---|
| 79 | | - * mempool_alloc() cannot handle that particular combination, |
|---|
| 80 | | - * so we need two separate attempts. |
|---|
| 81 | | - */ |
|---|
| 78 | + p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); |
|---|
| 79 | + if (!p) { |
|---|
| 82 | 80 | p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); |
|---|
| 83 | 81 | if (!p) |
|---|
| 84 | | - p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | |
|---|
| 85 | | - __GFP_NOWARN | __GFP_NORETRY); |
|---|
| 86 | | - if (!p) |
|---|
| 87 | 82 | return NULL; |
|---|
| 83 | + memset(p, 0, sizeof(*p)); |
|---|
| 88 | 84 | } |
|---|
| 89 | | - |
|---|
| 90 | | - memset(p, 0, sizeof(*p)); |
|---|
| 91 | 85 | INIT_LIST_HEAD(&p->pages); |
|---|
| 92 | 86 | return p; |
|---|
| 93 | 87 | } |
|---|
| .. | .. |
|---|
| 101 | 95 | |
|---|
| 102 | 96 | static struct nfs_pgio_header *nfs_writehdr_alloc(void) |
|---|
| 103 | 97 | { |
|---|
| 104 | | - struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); |
|---|
| 98 | + struct nfs_pgio_header *p; |
|---|
| 105 | 99 | |
|---|
| 106 | | - memset(p, 0, sizeof(*p)); |
|---|
| 100 | + p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); |
|---|
| 101 | + if (!p) { |
|---|
| 102 | + p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); |
|---|
| 103 | + if (!p) |
|---|
| 104 | + return NULL; |
|---|
| 105 | + memset(p, 0, sizeof(*p)); |
|---|
| 106 | + } |
|---|
| 107 | 107 | p->rw_mode = FMODE_WRITE; |
|---|
| 108 | 108 | return p; |
|---|
| 109 | 109 | } |
|---|
| .. | .. |
|---|
| 144 | 144 | { |
|---|
| 145 | 145 | if (ioc != NULL) |
|---|
| 146 | 146 | kref_put(&ioc->refcount, nfs_io_completion_release); |
|---|
| 147 | +} |
|---|
| 148 | + |
|---|
| 149 | +static void |
|---|
| 150 | +nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) |
|---|
| 151 | +{ |
|---|
| 152 | + if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { |
|---|
| 153 | + kref_get(&req->wb_kref); |
|---|
| 154 | + atomic_long_inc(&NFS_I(inode)->nrequests); |
|---|
| 155 | + } |
|---|
| 156 | +} |
|---|
| 157 | + |
|---|
| 158 | +static int |
|---|
| 159 | +nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) |
|---|
| 160 | +{ |
|---|
| 161 | + int ret; |
|---|
| 162 | + |
|---|
| 163 | + if (!test_bit(PG_REMOVE, &req->wb_flags)) |
|---|
| 164 | + return 0; |
|---|
| 165 | + ret = nfs_page_group_lock(req); |
|---|
| 166 | + if (ret) |
|---|
| 167 | + return ret; |
|---|
| 168 | + if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) |
|---|
| 169 | + nfs_page_set_inode_ref(req, inode); |
|---|
| 170 | + nfs_page_group_unlock(req); |
|---|
| 171 | + return 0; |
|---|
| 147 | 172 | } |
|---|
| 148 | 173 | |
|---|
| 149 | 174 | static struct nfs_page * |
|---|
| .. | .. |
|---|
| 215 | 240 | return req; |
|---|
| 216 | 241 | } |
|---|
| 217 | 242 | |
|---|
| 243 | +static struct nfs_page *nfs_find_and_lock_page_request(struct page *page) |
|---|
| 244 | +{ |
|---|
| 245 | + struct inode *inode = page_file_mapping(page)->host; |
|---|
| 246 | + struct nfs_page *req, *head; |
|---|
| 247 | + int ret; |
|---|
| 248 | + |
|---|
| 249 | + for (;;) { |
|---|
| 250 | + req = nfs_page_find_head_request(page); |
|---|
| 251 | + if (!req) |
|---|
| 252 | + return req; |
|---|
| 253 | + head = nfs_page_group_lock_head(req); |
|---|
| 254 | + if (head != req) |
|---|
| 255 | + nfs_release_request(req); |
|---|
| 256 | + if (IS_ERR(head)) |
|---|
| 257 | + return head; |
|---|
| 258 | + ret = nfs_cancel_remove_inode(head, inode); |
|---|
| 259 | + if (ret < 0) { |
|---|
| 260 | + nfs_unlock_and_release_request(head); |
|---|
| 261 | + return ERR_PTR(ret); |
|---|
| 262 | + } |
|---|
| 263 | + /* Ensure that nobody removed the request before we locked it */ |
|---|
| 264 | + if (head == nfs_page_private_request(page)) |
|---|
| 265 | + break; |
|---|
| 266 | + if (PageSwapCache(page)) |
|---|
| 267 | + break; |
|---|
| 268 | + nfs_unlock_and_release_request(head); |
|---|
| 269 | + } |
|---|
| 270 | + return head; |
|---|
| 271 | +} |
|---|
| 272 | + |
|---|
| 218 | 273 | /* Adjust the file length if we're writing beyond the end */ |
|---|
| 219 | 274 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) |
|---|
| 220 | 275 | { |
|---|
| .. | .. |
|---|
| 249 | 304 | NFS_INO_REVAL_PAGECACHE | |
|---|
| 250 | 305 | NFS_INO_INVALID_SIZE; |
|---|
| 251 | 306 | spin_unlock(&inode->i_lock); |
|---|
| 307 | +} |
|---|
| 308 | + |
|---|
| 309 | +static void nfs_mapping_set_error(struct page *page, int error) |
|---|
| 310 | +{ |
|---|
| 311 | + struct address_space *mapping = page_file_mapping(page); |
|---|
| 312 | + |
|---|
| 313 | + SetPageError(page); |
|---|
| 314 | + filemap_set_wb_err(mapping, error); |
|---|
| 315 | + if (mapping->host) |
|---|
| 316 | + errseq_set(&mapping->host->i_sb->s_wb_err, |
|---|
| 317 | + error == -ENOSPC ? -ENOSPC : -EIO); |
|---|
| 318 | + nfs_set_pageerror(mapping); |
|---|
| 252 | 319 | } |
|---|
| 253 | 320 | |
|---|
| 254 | 321 | /* |
|---|
| .. | .. |
|---|
| 368 | 435 | } |
|---|
| 369 | 436 | |
|---|
| 370 | 437 | /* |
|---|
| 371 | | - * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req |
|---|
| 372 | | - * |
|---|
| 373 | | - * this is a helper function for nfs_lock_and_join_requests |
|---|
| 374 | | - * |
|---|
| 375 | | - * @inode - inode associated with request page group, must be holding inode lock |
|---|
| 376 | | - * @head - head request of page group, must be holding head lock |
|---|
| 377 | | - * @req - request that couldn't lock and needs to wait on the req bit lock |
|---|
| 378 | | - * |
|---|
| 379 | | - * NOTE: this must be called holding page_group bit lock |
|---|
| 380 | | - * which will be released before returning. |
|---|
| 381 | | - * |
|---|
| 382 | | - * returns 0 on success, < 0 on error. |
|---|
| 383 | | - */ |
|---|
| 384 | | -static void |
|---|
| 385 | | -nfs_unroll_locks(struct inode *inode, struct nfs_page *head, |
|---|
| 386 | | - struct nfs_page *req) |
|---|
| 387 | | -{ |
|---|
| 388 | | - struct nfs_page *tmp; |
|---|
| 389 | | - |
|---|
| 390 | | - /* relinquish all the locks successfully grabbed this run */ |
|---|
| 391 | | - for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { |
|---|
| 392 | | - if (!kref_read(&tmp->wb_kref)) |
|---|
| 393 | | - continue; |
|---|
| 394 | | - nfs_unlock_and_release_request(tmp); |
|---|
| 395 | | - } |
|---|
| 396 | | -} |
|---|
| 397 | | - |
|---|
| 398 | | -/* |
|---|
| 399 | 438 | * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests |
|---|
| 400 | 439 | * |
|---|
| 401 | 440 | * @destroy_list - request list (using wb_this_page) terminated by @old_head |
|---|
| .. | .. |
|---|
| 452 | 491 | } |
|---|
| 453 | 492 | |
|---|
| 454 | 493 | /* |
|---|
| 455 | | - * nfs_lock_and_join_requests - join all subreqs to the head req and return |
|---|
| 456 | | - * a locked reference, cancelling any pending |
|---|
| 457 | | - * operations for this page. |
|---|
| 494 | + * nfs_join_page_group - destroy subrequests of the head req |
|---|
| 495 | + * @head: the page used to lookup the "page group" of nfs_page structures |
|---|
| 496 | + * @inode: Inode to which the request belongs. |
|---|
| 458 | 497 | * |
|---|
| 459 | | - * @page - the page used to lookup the "page group" of nfs_page structures |
|---|
| 498 | + * This function joins all sub requests to the head request by first |
|---|
| 499 | + * locking all requests in the group, cancelling any pending operations |
|---|
| 500 | + * and finally updating the head request to cover the whole range covered by |
|---|
| 501 | + * the (former) group. All subrequests are removed from any write or commit |
|---|
| 502 | + * lists, unlinked from the group and destroyed. |
|---|
| 503 | + */ |
|---|
| 504 | +void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, |
|---|
| 505 | + struct inode *inode) |
|---|
| 506 | +{ |
|---|
| 507 | + struct nfs_page *subreq; |
|---|
| 508 | + struct nfs_page *destroy_list = NULL; |
|---|
| 509 | + unsigned int pgbase, off, bytes; |
|---|
| 510 | + |
|---|
| 511 | + pgbase = head->wb_pgbase; |
|---|
| 512 | + bytes = head->wb_bytes; |
|---|
| 513 | + off = head->wb_offset; |
|---|
| 514 | + for (subreq = head->wb_this_page; subreq != head; |
|---|
| 515 | + subreq = subreq->wb_this_page) { |
|---|
| 516 | + /* Subrequests should always form a contiguous range */ |
|---|
| 517 | + if (pgbase > subreq->wb_pgbase) { |
|---|
| 518 | + off -= pgbase - subreq->wb_pgbase; |
|---|
| 519 | + bytes += pgbase - subreq->wb_pgbase; |
|---|
| 520 | + pgbase = subreq->wb_pgbase; |
|---|
| 521 | + } |
|---|
| 522 | + bytes = max(subreq->wb_pgbase + subreq->wb_bytes |
|---|
| 523 | + - pgbase, bytes); |
|---|
| 524 | + } |
|---|
| 525 | + |
|---|
| 526 | + /* Set the head request's range to cover the former page group */ |
|---|
| 527 | + head->wb_pgbase = pgbase; |
|---|
| 528 | + head->wb_bytes = bytes; |
|---|
| 529 | + head->wb_offset = off; |
|---|
| 530 | + |
|---|
| 531 | + /* Now that all requests are locked, make sure they aren't on any list. |
|---|
| 532 | + * Commit list removal accounting is done after locks are dropped */ |
|---|
| 533 | + subreq = head; |
|---|
| 534 | + do { |
|---|
| 535 | + nfs_clear_request_commit(cinfo, subreq); |
|---|
| 536 | + subreq = subreq->wb_this_page; |
|---|
| 537 | + } while (subreq != head); |
|---|
| 538 | + |
|---|
| 539 | + /* unlink subrequests from head, destroy them later */ |
|---|
| 540 | + if (head->wb_this_page != head) { |
|---|
| 541 | + /* destroy list will be terminated by head */ |
|---|
| 542 | + destroy_list = head->wb_this_page; |
|---|
| 543 | + head->wb_this_page = head; |
|---|
| 544 | + } |
|---|
| 545 | + |
|---|
| 546 | + nfs_destroy_unlinked_subrequests(destroy_list, head, inode); |
|---|
| 547 | +} |
|---|
| 548 | + |
|---|
| 549 | +/* |
|---|
| 550 | + * nfs_lock_and_join_requests - join all subreqs to the head req |
|---|
| 551 | + * @page: the page used to lookup the "page group" of nfs_page structures |
|---|
| 460 | 552 | * |
|---|
| 461 | 553 | * This function joins all sub requests to the head request by first |
|---|
| 462 | 554 | * locking all requests in the group, cancelling any pending operations |
|---|
| .. | .. |
|---|
| 473 | 565 | nfs_lock_and_join_requests(struct page *page) |
|---|
| 474 | 566 | { |
|---|
| 475 | 567 | struct inode *inode = page_file_mapping(page)->host; |
|---|
| 476 | | - struct nfs_page *head, *subreq; |
|---|
| 477 | | - struct nfs_page *destroy_list = NULL; |
|---|
| 478 | | - unsigned int total_bytes; |
|---|
| 568 | + struct nfs_page *head; |
|---|
| 569 | + struct nfs_commit_info cinfo; |
|---|
| 479 | 570 | int ret; |
|---|
| 480 | 571 | |
|---|
| 481 | | -try_again: |
|---|
| 572 | + nfs_init_cinfo_from_inode(&cinfo, inode); |
|---|
| 482 | 573 | /* |
|---|
| 483 | 574 | * A reference is taken only on the head request which acts as a |
|---|
| 484 | 575 | * reference to the whole page group - the group will not be destroyed |
|---|
| 485 | 576 | * until the head reference is released. |
|---|
| 486 | 577 | */ |
|---|
| 487 | | - head = nfs_page_find_head_request(page); |
|---|
| 488 | | - if (!head) |
|---|
| 489 | | - return NULL; |
|---|
| 490 | | - |
|---|
| 491 | | - /* lock the page head first in order to avoid an ABBA inefficiency */ |
|---|
| 492 | | - if (!nfs_lock_request(head)) { |
|---|
| 493 | | - ret = nfs_wait_on_request(head); |
|---|
| 494 | | - nfs_release_request(head); |
|---|
| 495 | | - if (ret < 0) |
|---|
| 496 | | - return ERR_PTR(ret); |
|---|
| 497 | | - goto try_again; |
|---|
| 498 | | - } |
|---|
| 499 | | - |
|---|
| 500 | | - /* Ensure that nobody removed the request before we locked it */ |
|---|
| 501 | | - if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { |
|---|
| 502 | | - nfs_unlock_and_release_request(head); |
|---|
| 503 | | - goto try_again; |
|---|
| 504 | | - } |
|---|
| 505 | | - |
|---|
| 506 | | - ret = nfs_page_group_lock(head); |
|---|
| 507 | | - if (ret < 0) |
|---|
| 508 | | - goto release_request; |
|---|
| 578 | + head = nfs_find_and_lock_page_request(page); |
|---|
| 579 | + if (IS_ERR_OR_NULL(head)) |
|---|
| 580 | + return head; |
|---|
| 509 | 581 | |
|---|
| 510 | 582 | /* lock each request in the page group */ |
|---|
| 511 | | - total_bytes = head->wb_bytes; |
|---|
| 512 | | - for (subreq = head->wb_this_page; subreq != head; |
|---|
| 513 | | - subreq = subreq->wb_this_page) { |
|---|
| 514 | | - |
|---|
| 515 | | - if (!kref_get_unless_zero(&subreq->wb_kref)) { |
|---|
| 516 | | - if (subreq->wb_offset == head->wb_offset + total_bytes) |
|---|
| 517 | | - total_bytes += subreq->wb_bytes; |
|---|
| 518 | | - continue; |
|---|
| 519 | | - } |
|---|
| 520 | | - |
|---|
| 521 | | - while (!nfs_lock_request(subreq)) { |
|---|
| 522 | | - /* |
|---|
| 523 | | - * Unlock page to allow nfs_page_group_sync_on_bit() |
|---|
| 524 | | - * to succeed |
|---|
| 525 | | - */ |
|---|
| 526 | | - nfs_page_group_unlock(head); |
|---|
| 527 | | - ret = nfs_wait_on_request(subreq); |
|---|
| 528 | | - if (!ret) |
|---|
| 529 | | - ret = nfs_page_group_lock(head); |
|---|
| 530 | | - if (ret < 0) { |
|---|
| 531 | | - nfs_unroll_locks(inode, head, subreq); |
|---|
| 532 | | - nfs_release_request(subreq); |
|---|
| 533 | | - goto release_request; |
|---|
| 534 | | - } |
|---|
| 535 | | - } |
|---|
| 536 | | - /* |
|---|
| 537 | | - * Subrequests are always contiguous, non overlapping |
|---|
| 538 | | - * and in order - but may be repeated (mirrored writes). |
|---|
| 539 | | - */ |
|---|
| 540 | | - if (subreq->wb_offset == (head->wb_offset + total_bytes)) { |
|---|
| 541 | | - /* keep track of how many bytes this group covers */ |
|---|
| 542 | | - total_bytes += subreq->wb_bytes; |
|---|
| 543 | | - } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || |
|---|
| 544 | | - ((subreq->wb_offset + subreq->wb_bytes) > |
|---|
| 545 | | - (head->wb_offset + total_bytes)))) { |
|---|
| 546 | | - nfs_page_group_unlock(head); |
|---|
| 547 | | - nfs_unroll_locks(inode, head, subreq); |
|---|
| 548 | | - nfs_unlock_and_release_request(subreq); |
|---|
| 549 | | - ret = -EIO; |
|---|
| 550 | | - goto release_request; |
|---|
| 551 | | - } |
|---|
| 552 | | - } |
|---|
| 553 | | - |
|---|
| 554 | | - /* Now that all requests are locked, make sure they aren't on any list. |
|---|
| 555 | | - * Commit list removal accounting is done after locks are dropped */ |
|---|
| 556 | | - subreq = head; |
|---|
| 557 | | - do { |
|---|
| 558 | | - nfs_clear_request_commit(subreq); |
|---|
| 559 | | - subreq = subreq->wb_this_page; |
|---|
| 560 | | - } while (subreq != head); |
|---|
| 561 | | - |
|---|
| 562 | | - /* unlink subrequests from head, destroy them later */ |
|---|
| 563 | | - if (head->wb_this_page != head) { |
|---|
| 564 | | - /* destroy list will be terminated by head */ |
|---|
| 565 | | - destroy_list = head->wb_this_page; |
|---|
| 566 | | - head->wb_this_page = head; |
|---|
| 567 | | - |
|---|
| 568 | | - /* change head request to cover whole range that |
|---|
| 569 | | - * the former page group covered */ |
|---|
| 570 | | - head->wb_bytes = total_bytes; |
|---|
| 571 | | - } |
|---|
| 572 | | - |
|---|
| 573 | | - /* Postpone destruction of this request */ |
|---|
| 574 | | - if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { |
|---|
| 575 | | - set_bit(PG_INODE_REF, &head->wb_flags); |
|---|
| 576 | | - kref_get(&head->wb_kref); |
|---|
| 577 | | - atomic_long_inc(&NFS_I(inode)->nrequests); |
|---|
| 578 | | - } |
|---|
| 579 | | - |
|---|
| 580 | | - nfs_page_group_unlock(head); |
|---|
| 581 | | - |
|---|
| 582 | | - nfs_destroy_unlinked_subrequests(destroy_list, head, inode); |
|---|
| 583 | | - |
|---|
| 584 | | - /* Did we lose a race with nfs_inode_remove_request()? */ |
|---|
| 585 | | - if (!(PagePrivate(page) || PageSwapCache(page))) { |
|---|
| 583 | + ret = nfs_page_group_lock_subrequests(head); |
|---|
| 584 | + if (ret < 0) { |
|---|
| 586 | 585 | nfs_unlock_and_release_request(head); |
|---|
| 587 | | - return NULL; |
|---|
| 586 | + return ERR_PTR(ret); |
|---|
| 588 | 587 | } |
|---|
| 589 | 588 | |
|---|
| 590 | | - /* still holds ref on head from nfs_page_find_head_request |
|---|
| 591 | | - * and still has lock on head from lock loop */ |
|---|
| 589 | + nfs_join_page_group(head, &cinfo, inode); |
|---|
| 590 | + |
|---|
| 592 | 591 | return head; |
|---|
| 593 | | - |
|---|
| 594 | | -release_request: |
|---|
| 595 | | - nfs_unlock_and_release_request(head); |
|---|
| 596 | | - return ERR_PTR(ret); |
|---|
| 597 | 592 | } |
|---|
| 598 | 593 | |
|---|
| 599 | | -static void nfs_write_error_remove_page(struct nfs_page *req) |
|---|
| 594 | +static void nfs_write_error(struct nfs_page *req, int error) |
|---|
| 600 | 595 | { |
|---|
| 596 | + trace_nfs_write_error(req, error); |
|---|
| 597 | + nfs_mapping_set_error(req->wb_page, error); |
|---|
| 598 | + nfs_inode_remove_request(req); |
|---|
| 601 | 599 | nfs_end_page_writeback(req); |
|---|
| 602 | | - generic_error_remove_page(page_file_mapping(req->wb_page), |
|---|
| 603 | | - req->wb_page); |
|---|
| 604 | 600 | nfs_release_request(req); |
|---|
| 605 | | -} |
|---|
| 606 | | - |
|---|
| 607 | | -static bool |
|---|
| 608 | | -nfs_error_is_fatal_on_server(int err) |
|---|
| 609 | | -{ |
|---|
| 610 | | - switch (err) { |
|---|
| 611 | | - case 0: |
|---|
| 612 | | - case -ERESTARTSYS: |
|---|
| 613 | | - case -EINTR: |
|---|
| 614 | | - return false; |
|---|
| 615 | | - } |
|---|
| 616 | | - return nfs_error_is_fatal(err); |
|---|
| 617 | 601 | } |
|---|
| 618 | 602 | |
|---|
| 619 | 603 | /* |
|---|
| .. | .. |
|---|
| 636 | 620 | nfs_set_page_writeback(page); |
|---|
| 637 | 621 | WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); |
|---|
| 638 | 622 | |
|---|
| 639 | | - ret = req->wb_context->error; |
|---|
| 640 | 623 | /* If there is a fatal error that covers this write, just exit */ |
|---|
| 624 | + ret = pgio->pg_error; |
|---|
| 641 | 625 | if (nfs_error_is_fatal_on_server(ret)) |
|---|
| 642 | 626 | goto out_launder; |
|---|
| 643 | 627 | |
|---|
| .. | .. |
|---|
| 648 | 632 | * Remove the problematic req upon fatal errors on the server |
|---|
| 649 | 633 | */ |
|---|
| 650 | 634 | if (nfs_error_is_fatal(ret)) { |
|---|
| 651 | | - nfs_context_set_write_error(req->wb_context, ret); |
|---|
| 652 | 635 | if (nfs_error_is_fatal_on_server(ret)) |
|---|
| 653 | 636 | goto out_launder; |
|---|
| 654 | 637 | } else |
|---|
| 655 | 638 | ret = -EAGAIN; |
|---|
| 656 | 639 | nfs_redirty_request(req); |
|---|
| 640 | + pgio->pg_error = 0; |
|---|
| 657 | 641 | } else |
|---|
| 658 | 642 | nfs_add_stats(page_file_mapping(page)->host, |
|---|
| 659 | 643 | NFSIOS_WRITEPAGES, 1); |
|---|
| 660 | 644 | out: |
|---|
| 661 | 645 | return ret; |
|---|
| 662 | 646 | out_launder: |
|---|
| 663 | | - nfs_write_error_remove_page(req); |
|---|
| 647 | + nfs_write_error(req, ret); |
|---|
| 664 | 648 | return 0; |
|---|
| 665 | 649 | } |
|---|
| 666 | 650 | |
|---|
| .. | .. |
|---|
| 673 | 657 | ret = nfs_page_async_flush(pgio, page); |
|---|
| 674 | 658 | if (ret == -EAGAIN) { |
|---|
| 675 | 659 | redirty_page_for_writepage(wbc, page); |
|---|
| 676 | | - ret = 0; |
|---|
| 660 | + ret = AOP_WRITEPAGE_ACTIVATE; |
|---|
| 677 | 661 | } |
|---|
| 678 | 662 | return ret; |
|---|
| 679 | 663 | } |
|---|
| .. | .. |
|---|
| 692 | 676 | nfs_pageio_init_write(&pgio, inode, 0, |
|---|
| 693 | 677 | false, &nfs_async_write_completion_ops); |
|---|
| 694 | 678 | err = nfs_do_writepage(page, wbc, &pgio); |
|---|
| 679 | + pgio.pg_error = 0; |
|---|
| 695 | 680 | nfs_pageio_complete(&pgio); |
|---|
| 696 | | - if (err < 0) |
|---|
| 697 | | - return err; |
|---|
| 698 | | - if (pgio.pg_error < 0) |
|---|
| 699 | | - return pgio.pg_error; |
|---|
| 700 | | - return 0; |
|---|
| 681 | + return err; |
|---|
| 701 | 682 | } |
|---|
| 702 | 683 | |
|---|
| 703 | 684 | int nfs_writepage(struct page *page, struct writeback_control *wbc) |
|---|
| .. | .. |
|---|
| 705 | 686 | int ret; |
|---|
| 706 | 687 | |
|---|
| 707 | 688 | ret = nfs_writepage_locked(page, wbc); |
|---|
| 708 | | - unlock_page(page); |
|---|
| 689 | + if (ret != AOP_WRITEPAGE_ACTIVATE) |
|---|
| 690 | + unlock_page(page); |
|---|
| 709 | 691 | return ret; |
|---|
| 710 | 692 | } |
|---|
| 711 | 693 | |
|---|
| .. | .. |
|---|
| 714 | 696 | int ret; |
|---|
| 715 | 697 | |
|---|
| 716 | 698 | ret = nfs_do_writepage(page, wbc, data); |
|---|
| 717 | | - unlock_page(page); |
|---|
| 699 | + if (ret != AOP_WRITEPAGE_ACTIVATE) |
|---|
| 700 | + unlock_page(page); |
|---|
| 718 | 701 | return ret; |
|---|
| 719 | 702 | } |
|---|
| 720 | 703 | |
|---|
| .. | .. |
|---|
| 727 | 710 | { |
|---|
| 728 | 711 | struct inode *inode = mapping->host; |
|---|
| 729 | 712 | struct nfs_pageio_descriptor pgio; |
|---|
| 730 | | - struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS); |
|---|
| 713 | + struct nfs_io_completion *ioc; |
|---|
| 731 | 714 | int err; |
|---|
| 732 | 715 | |
|---|
| 733 | 716 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
|---|
| 734 | 717 | |
|---|
| 718 | + ioc = nfs_io_completion_alloc(GFP_KERNEL); |
|---|
| 735 | 719 | if (ioc) |
|---|
| 736 | 720 | nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); |
|---|
| 737 | 721 | |
|---|
| .. | .. |
|---|
| 739 | 723 | &nfs_async_write_completion_ops); |
|---|
| 740 | 724 | pgio.pg_io_completion = ioc; |
|---|
| 741 | 725 | err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); |
|---|
| 726 | + pgio.pg_error = 0; |
|---|
| 742 | 727 | nfs_pageio_complete(&pgio); |
|---|
| 743 | 728 | nfs_io_completion_put(ioc); |
|---|
| 744 | 729 | |
|---|
| 745 | | - if (err < 0) |
|---|
| 746 | | - goto out_err; |
|---|
| 747 | | - err = pgio.pg_error; |
|---|
| 748 | 730 | if (err < 0) |
|---|
| 749 | 731 | goto out_err; |
|---|
| 750 | 732 | return 0; |
|---|
| .. | .. |
|---|
| 881 | 863 | /** |
|---|
| 882 | 864 | * nfs_request_add_commit_list - add request to a commit list |
|---|
| 883 | 865 | * @req: pointer to a struct nfs_page |
|---|
| 884 | | - * @dst: commit list head |
|---|
| 885 | 866 | * @cinfo: holds list lock and accounting info |
|---|
| 886 | 867 | * |
|---|
| 887 | 868 | * This sets the PG_CLEAN bit, updates the cinfo count of |
|---|
| .. | .. |
|---|
| 960 | 941 | static void |
|---|
| 961 | 942 | nfs_clear_page_commit(struct page *page) |
|---|
| 962 | 943 | { |
|---|
| 963 | | - dec_node_page_state(page, NR_UNSTABLE_NFS); |
|---|
| 944 | + dec_node_page_state(page, NR_WRITEBACK); |
|---|
| 964 | 945 | dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, |
|---|
| 965 | | - WB_RECLAIMABLE); |
|---|
| 946 | + WB_WRITEBACK); |
|---|
| 966 | 947 | } |
|---|
| 967 | 948 | |
|---|
| 968 | 949 | /* Called holding the request lock on @req */ |
|---|
| 969 | | -static void |
|---|
| 970 | | -nfs_clear_request_commit(struct nfs_page *req) |
|---|
| 950 | +static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
|---|
| 951 | + struct nfs_page *req) |
|---|
| 971 | 952 | { |
|---|
| 972 | 953 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
|---|
| 973 | | - struct inode *inode = d_inode(req->wb_context->dentry); |
|---|
| 974 | | - struct nfs_commit_info cinfo; |
|---|
| 954 | + struct nfs_open_context *ctx = nfs_req_openctx(req); |
|---|
| 955 | + struct inode *inode = d_inode(ctx->dentry); |
|---|
| 975 | 956 | |
|---|
| 976 | | - nfs_init_cinfo_from_inode(&cinfo, inode); |
|---|
| 977 | 957 | mutex_lock(&NFS_I(inode)->commit_mutex); |
|---|
| 978 | | - if (!pnfs_clear_request_commit(req, &cinfo)) { |
|---|
| 979 | | - nfs_request_remove_commit_list(req, &cinfo); |
|---|
| 958 | + if (!pnfs_clear_request_commit(req, cinfo)) { |
|---|
| 959 | + nfs_request_remove_commit_list(req, cinfo); |
|---|
| 980 | 960 | } |
|---|
| 981 | 961 | mutex_unlock(&NFS_I(inode)->commit_mutex); |
|---|
| 982 | 962 | nfs_clear_page_commit(req->wb_page); |
|---|
| .. | .. |
|---|
| 1010 | 990 | nfs_list_remove_request(req); |
|---|
| 1011 | 991 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && |
|---|
| 1012 | 992 | (hdr->good_bytes < bytes)) { |
|---|
| 1013 | | - nfs_set_pageerror(page_file_mapping(req->wb_page)); |
|---|
| 1014 | | - nfs_context_set_write_error(req->wb_context, hdr->error); |
|---|
| 993 | + trace_nfs_comp_error(req, hdr->error); |
|---|
| 994 | + nfs_mapping_set_error(req->wb_page, hdr->error); |
|---|
| 1015 | 995 | goto remove_req; |
|---|
| 1016 | 996 | } |
|---|
| 1017 | 997 | if (nfs_write_need_commit(hdr)) { |
|---|
| 998 | + /* Reset wb_nio, since the write was successful. */ |
|---|
| 999 | + req->wb_nio = 0; |
|---|
| 1018 | 1000 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); |
|---|
| 1019 | 1001 | nfs_mark_request_commit(req, hdr->lseg, &cinfo, |
|---|
| 1020 | 1002 | hdr->pgio_mirror_idx); |
|---|
| .. | .. |
|---|
| 1134 | 1116 | req->wb_bytes = end - req->wb_offset; |
|---|
| 1135 | 1117 | else |
|---|
| 1136 | 1118 | req->wb_bytes = rqend - req->wb_offset; |
|---|
| 1119 | + req->wb_nio = 0; |
|---|
| 1137 | 1120 | return req; |
|---|
| 1138 | 1121 | out_flushme: |
|---|
| 1139 | 1122 | /* |
|---|
| .. | .. |
|---|
| 1163 | 1146 | req = nfs_try_to_update_request(inode, page, offset, bytes); |
|---|
| 1164 | 1147 | if (req != NULL) |
|---|
| 1165 | 1148 | goto out; |
|---|
| 1166 | | - req = nfs_create_request(ctx, page, NULL, offset, bytes); |
|---|
| 1149 | + req = nfs_create_request(ctx, page, offset, bytes); |
|---|
| 1167 | 1150 | if (IS_ERR(req)) |
|---|
| 1168 | 1151 | goto out; |
|---|
| 1169 | 1152 | nfs_inode_add_request(inode, req); |
|---|
| .. | .. |
|---|
| 1208 | 1191 | return 0; |
|---|
| 1209 | 1192 | l_ctx = req->wb_lock_context; |
|---|
| 1210 | 1193 | do_flush = req->wb_page != page || |
|---|
| 1211 | | - !nfs_match_open_context(req->wb_context, ctx); |
|---|
| 1194 | + !nfs_match_open_context(nfs_req_openctx(req), ctx); |
|---|
| 1212 | 1195 | if (l_ctx && flctx && |
|---|
| 1213 | 1196 | !(list_empty_careful(&flctx->flc_posix) && |
|---|
| 1214 | 1197 | list_empty_careful(&flctx->flc_flock))) { |
|---|
| .. | .. |
|---|
| 1236 | 1219 | nfs_key_timeout_notify(struct file *filp, struct inode *inode) |
|---|
| 1237 | 1220 | { |
|---|
| 1238 | 1221 | struct nfs_open_context *ctx = nfs_file_open_context(filp); |
|---|
| 1239 | | - struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; |
|---|
| 1240 | 1222 | |
|---|
| 1241 | | - return rpcauth_key_timeout_notify(auth, ctx->cred); |
|---|
| 1223 | + if (nfs_ctx_key_to_expire(ctx, inode) && |
|---|
| 1224 | + !ctx->ll_cred) |
|---|
| 1225 | + /* Already expired! */ |
|---|
| 1226 | + return -EACCES; |
|---|
| 1227 | + return 0; |
|---|
| 1242 | 1228 | } |
|---|
| 1243 | 1229 | |
|---|
| 1244 | 1230 | /* |
|---|
| .. | .. |
|---|
| 1247 | 1233 | bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) |
|---|
| 1248 | 1234 | { |
|---|
| 1249 | 1235 | struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; |
|---|
| 1236 | + struct rpc_cred *cred = ctx->ll_cred; |
|---|
| 1237 | + struct auth_cred acred = { |
|---|
| 1238 | + .cred = ctx->cred, |
|---|
| 1239 | + }; |
|---|
| 1250 | 1240 | |
|---|
| 1251 | | - return rpcauth_cred_key_to_expire(auth, ctx->cred); |
|---|
| 1241 | + if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) { |
|---|
| 1242 | + put_rpccred(cred); |
|---|
| 1243 | + ctx->ll_cred = NULL; |
|---|
| 1244 | + cred = NULL; |
|---|
| 1245 | + } |
|---|
| 1246 | + if (!cred) |
|---|
| 1247 | + cred = auth->au_ops->lookup_cred(auth, &acred, 0); |
|---|
| 1248 | + if (!cred || IS_ERR(cred)) |
|---|
| 1249 | + return true; |
|---|
| 1250 | + ctx->ll_cred = cred; |
|---|
| 1251 | + return !!(cred->cr_ops->crkey_timeout && |
|---|
| 1252 | + cred->cr_ops->crkey_timeout(cred)); |
|---|
| 1252 | 1253 | } |
|---|
| 1253 | 1254 | |
|---|
| 1254 | 1255 | /* |
|---|
| .. | .. |
|---|
| 1380 | 1381 | |
|---|
| 1381 | 1382 | task_setup_data->priority = priority; |
|---|
| 1382 | 1383 | rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); |
|---|
| 1383 | | - trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes, |
|---|
| 1384 | | - hdr->args.stable); |
|---|
| 1384 | + trace_nfs_initiate_write(hdr); |
|---|
| 1385 | 1385 | } |
|---|
| 1386 | 1386 | |
|---|
| 1387 | 1387 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
|---|
| .. | .. |
|---|
| 1390 | 1390 | */ |
|---|
| 1391 | 1391 | static void nfs_redirty_request(struct nfs_page *req) |
|---|
| 1392 | 1392 | { |
|---|
| 1393 | + /* Bump the transmission count */ |
|---|
| 1394 | + req->wb_nio++; |
|---|
| 1393 | 1395 | nfs_mark_request_dirty(req); |
|---|
| 1394 | | - set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); |
|---|
| 1396 | + set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); |
|---|
| 1395 | 1397 | nfs_end_page_writeback(req); |
|---|
| 1396 | 1398 | nfs_release_request(req); |
|---|
| 1397 | 1399 | } |
|---|
| .. | .. |
|---|
| 1403 | 1405 | while (!list_empty(head)) { |
|---|
| 1404 | 1406 | req = nfs_list_entry(head->next); |
|---|
| 1405 | 1407 | nfs_list_remove_request(req); |
|---|
| 1406 | | - if (nfs_error_is_fatal(error)) { |
|---|
| 1407 | | - nfs_context_set_write_error(req->wb_context, error); |
|---|
| 1408 | | - if (nfs_error_is_fatal_on_server(error)) { |
|---|
| 1409 | | - nfs_write_error_remove_page(req); |
|---|
| 1410 | | - continue; |
|---|
| 1411 | | - } |
|---|
| 1412 | | - } |
|---|
| 1413 | | - nfs_redirty_request(req); |
|---|
| 1408 | + if (nfs_error_is_fatal_on_server(error)) |
|---|
| 1409 | + nfs_write_error(req, error); |
|---|
| 1410 | + else |
|---|
| 1411 | + nfs_redirty_request(req); |
|---|
| 1414 | 1412 | } |
|---|
| 1415 | 1413 | } |
|---|
| 1416 | 1414 | |
|---|
| .. | .. |
|---|
| 1547 | 1545 | return status; |
|---|
| 1548 | 1546 | |
|---|
| 1549 | 1547 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); |
|---|
| 1550 | | - trace_nfs_writeback_done(inode, task->tk_status, |
|---|
| 1551 | | - hdr->args.offset, hdr->res.verf); |
|---|
| 1548 | + trace_nfs_writeback_done(task, hdr); |
|---|
| 1552 | 1549 | |
|---|
| 1553 | 1550 | if (hdr->res.verf->committed < hdr->args.stable && |
|---|
| 1554 | 1551 | task->tk_status >= 0) { |
|---|
| .. | .. |
|---|
| 1628 | 1625 | */ |
|---|
| 1629 | 1626 | argp->stable = NFS_FILE_SYNC; |
|---|
| 1630 | 1627 | } |
|---|
| 1628 | + resp->count = 0; |
|---|
| 1629 | + resp->verf->committed = 0; |
|---|
| 1631 | 1630 | rpc_restart_call_prepare(task); |
|---|
| 1632 | 1631 | } |
|---|
| 1633 | 1632 | } |
|---|
| .. | .. |
|---|
| 1643 | 1642 | atomic_inc(&cinfo->rpcs_out); |
|---|
| 1644 | 1643 | } |
|---|
| 1645 | 1644 | |
|---|
| 1646 | | -static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) |
|---|
| 1645 | +bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) |
|---|
| 1647 | 1646 | { |
|---|
| 1648 | | - if (atomic_dec_and_test(&cinfo->rpcs_out)) |
|---|
| 1647 | + if (atomic_dec_and_test(&cinfo->rpcs_out)) { |
|---|
| 1649 | 1648 | wake_up_var(&cinfo->rpcs_out); |
|---|
| 1649 | + return true; |
|---|
| 1650 | + } |
|---|
| 1651 | + return false; |
|---|
| 1650 | 1652 | } |
|---|
| 1651 | 1653 | |
|---|
| 1652 | 1654 | void nfs_commitdata_release(struct nfs_commit_data *data) |
|---|
| .. | .. |
|---|
| 1714 | 1716 | struct pnfs_layout_segment *lseg, |
|---|
| 1715 | 1717 | struct nfs_commit_info *cinfo) |
|---|
| 1716 | 1718 | { |
|---|
| 1717 | | - struct nfs_page *first = nfs_list_entry(head->next); |
|---|
| 1718 | | - struct inode *inode = d_inode(first->wb_context->dentry); |
|---|
| 1719 | + struct nfs_page *first; |
|---|
| 1720 | + struct nfs_open_context *ctx; |
|---|
| 1721 | + struct inode *inode; |
|---|
| 1719 | 1722 | |
|---|
| 1720 | 1723 | /* Set up the RPC argument and reply structs |
|---|
| 1721 | 1724 | * NB: take care not to mess about with data->commit et al. */ |
|---|
| 1722 | 1725 | |
|---|
| 1723 | | - list_splice_init(head, &data->pages); |
|---|
| 1726 | + if (head) |
|---|
| 1727 | + list_splice_init(head, &data->pages); |
|---|
| 1728 | + |
|---|
| 1729 | + first = nfs_list_entry(data->pages.next); |
|---|
| 1730 | + ctx = nfs_req_openctx(first); |
|---|
| 1731 | + inode = d_inode(ctx->dentry); |
|---|
| 1724 | 1732 | |
|---|
| 1725 | 1733 | data->inode = inode; |
|---|
| 1726 | | - data->cred = first->wb_context->cred; |
|---|
| 1734 | + data->cred = ctx->cred; |
|---|
| 1727 | 1735 | data->lseg = lseg; /* reference transferred */ |
|---|
| 1728 | 1736 | /* only set lwb for pnfs commit */ |
|---|
| 1729 | 1737 | if (lseg) |
|---|
| .. | .. |
|---|
| 1736 | 1744 | /* Note: we always request a commit of the entire inode */ |
|---|
| 1737 | 1745 | data->args.offset = 0; |
|---|
| 1738 | 1746 | data->args.count = 0; |
|---|
| 1739 | | - data->context = get_nfs_open_context(first->wb_context); |
|---|
| 1747 | + data->context = get_nfs_open_context(ctx); |
|---|
| 1740 | 1748 | data->res.fattr = &data->fattr; |
|---|
| 1741 | 1749 | data->res.verf = &data->verf; |
|---|
| 1742 | 1750 | nfs_fattr_init(&data->fattr); |
|---|
| 1751 | + nfs_commit_begin(cinfo->mds); |
|---|
| 1743 | 1752 | } |
|---|
| 1744 | 1753 | EXPORT_SYMBOL_GPL(nfs_init_commit); |
|---|
| 1745 | 1754 | |
|---|
| .. | .. |
|---|
| 1781 | 1790 | if (list_empty(head)) |
|---|
| 1782 | 1791 | return 0; |
|---|
| 1783 | 1792 | |
|---|
| 1784 | | - data = nfs_commitdata_alloc(true); |
|---|
| 1793 | + data = nfs_commitdata_alloc(); |
|---|
| 1794 | + if (!data) { |
|---|
| 1795 | + nfs_retry_commit(head, NULL, cinfo, -1); |
|---|
| 1796 | + return -ENOMEM; |
|---|
| 1797 | + } |
|---|
| 1785 | 1798 | |
|---|
| 1786 | 1799 | /* Set up the argument struct */ |
|---|
| 1787 | 1800 | nfs_init_commit(data, head, NULL, cinfo); |
|---|
| 1788 | | - atomic_inc(&cinfo->mds->rpcs_out); |
|---|
| 1789 | 1801 | return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), |
|---|
| 1790 | | - data->mds_ops, how, 0); |
|---|
| 1802 | + data->mds_ops, how, RPC_TASK_CRED_NOREF); |
|---|
| 1791 | 1803 | } |
|---|
| 1792 | 1804 | |
|---|
| 1793 | 1805 | /* |
|---|
| .. | .. |
|---|
| 1802 | 1814 | |
|---|
| 1803 | 1815 | /* Call the NFS version-specific code */ |
|---|
| 1804 | 1816 | NFS_PROTO(data->inode)->commit_done(task, data); |
|---|
| 1805 | | - trace_nfs_commit_done(data); |
|---|
| 1817 | + trace_nfs_commit_done(task, data); |
|---|
| 1806 | 1818 | } |
|---|
| 1807 | 1819 | |
|---|
| 1808 | 1820 | static void nfs_commit_release_pages(struct nfs_commit_data *data) |
|---|
| .. | .. |
|---|
| 1820 | 1832 | nfs_clear_page_commit(req->wb_page); |
|---|
| 1821 | 1833 | |
|---|
| 1822 | 1834 | dprintk("NFS: commit (%s/%llu %d@%lld)", |
|---|
| 1823 | | - req->wb_context->dentry->d_sb->s_id, |
|---|
| 1824 | | - (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), |
|---|
| 1835 | + nfs_req_openctx(req)->dentry->d_sb->s_id, |
|---|
| 1836 | + (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), |
|---|
| 1825 | 1837 | req->wb_bytes, |
|---|
| 1826 | 1838 | (long long)req_offset(req)); |
|---|
| 1827 | 1839 | if (status < 0) { |
|---|
| 1828 | | - nfs_context_set_write_error(req->wb_context, status); |
|---|
| 1829 | | - if (req->wb_page) |
|---|
| 1840 | + if (req->wb_page) { |
|---|
| 1841 | + trace_nfs_commit_error(req, status); |
|---|
| 1842 | + nfs_mapping_set_error(req->wb_page, status); |
|---|
| 1830 | 1843 | nfs_inode_remove_request(req); |
|---|
| 1844 | + } |
|---|
| 1831 | 1845 | dprintk_cont(", error = %d\n", status); |
|---|
| 1832 | 1846 | goto next; |
|---|
| 1833 | 1847 | } |
|---|
| 1834 | 1848 | |
|---|
| 1835 | 1849 | /* Okay, COMMIT succeeded, apparently. Check the verifier |
|---|
| 1836 | 1850 | * returned by the server against all stored verfs. */ |
|---|
| 1837 | | - if (verf->committed > NFS_UNSTABLE && |
|---|
| 1838 | | - !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) { |
|---|
| 1851 | + if (nfs_write_match_verf(verf, req)) { |
|---|
| 1839 | 1852 | /* We have a match */ |
|---|
| 1840 | 1853 | if (req->wb_page) |
|---|
| 1841 | 1854 | nfs_inode_remove_request(req); |
|---|
| .. | .. |
|---|
| 1845 | 1858 | /* We have a mismatch. Write the page again */ |
|---|
| 1846 | 1859 | dprintk_cont(" mismatch\n"); |
|---|
| 1847 | 1860 | nfs_mark_request_dirty(req); |
|---|
| 1848 | | - set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); |
|---|
| 1861 | + set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); |
|---|
| 1849 | 1862 | next: |
|---|
| 1850 | 1863 | nfs_unlock_and_release_request(req); |
|---|
| 1851 | 1864 | /* Latency breaker */ |
|---|
| .. | .. |
|---|
| 2135 | 2148 | * This allows larger machines to have larger/more transfers. |
|---|
| 2136 | 2149 | * Limit the default to 256M |
|---|
| 2137 | 2150 | */ |
|---|
| 2138 | | - nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); |
|---|
| 2151 | + nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
|---|
| 2139 | 2152 | if (nfs_congestion_kb > 256*1024) |
|---|
| 2140 | 2153 | nfs_congestion_kb = 256*1024; |
|---|
| 2141 | 2154 | |
|---|