| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | drbd_worker.c |
|---|
| 3 | 4 | |
|---|
| .. | .. |
|---|
| 7 | 8 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
|---|
| 8 | 9 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
|---|
| 9 | 10 | |
|---|
| 10 | | - drbd is free software; you can redistribute it and/or modify |
|---|
| 11 | | - it under the terms of the GNU General Public License as published by |
|---|
| 12 | | - the Free Software Foundation; either version 2, or (at your option) |
|---|
| 13 | | - any later version. |
|---|
| 14 | | - |
|---|
| 15 | | - drbd is distributed in the hope that it will be useful, |
|---|
| 16 | | - but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 17 | | - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 18 | | - GNU General Public License for more details. |
|---|
| 19 | | - |
|---|
| 20 | | - You should have received a copy of the GNU General Public License |
|---|
| 21 | | - along with drbd; see the file COPYING. If not, write to |
|---|
| 22 | | - the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
|---|
| 23 | 11 | |
|---|
| 24 | 12 | */ |
|---|
| 25 | 13 | |
|---|
| .. | .. |
|---|
| 34 | 22 | #include <linux/random.h> |
|---|
| 35 | 23 | #include <linux/string.h> |
|---|
| 36 | 24 | #include <linux/scatterlist.h> |
|---|
| 25 | +#include <linux/part_stat.h> |
|---|
| 37 | 26 | |
|---|
| 38 | 27 | #include "drbd_int.h" |
|---|
| 39 | 28 | #include "drbd_protocol.h" |
|---|
| .. | .. |
|---|
| 152 | 141 | |
|---|
| 153 | 142 | do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); |
|---|
| 154 | 143 | |
|---|
| 155 | | - /* FIXME do we want to detach for failed REQ_DISCARD? |
|---|
| 156 | | - * ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */ |
|---|
| 144 | + /* FIXME do we want to detach for failed REQ_OP_DISCARD? |
|---|
| 145 | + * ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */ |
|---|
| 157 | 146 | if (peer_req->flags & EE_WAS_ERROR) |
|---|
| 158 | 147 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); |
|---|
| 159 | 148 | |
|---|
| .. | .. |
|---|
| 295 | 284 | complete_master_bio(device, &m); |
|---|
| 296 | 285 | } |
|---|
| 297 | 286 | |
|---|
| 298 | | -void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest) |
|---|
| 287 | +void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest) |
|---|
| 299 | 288 | { |
|---|
| 300 | | - AHASH_REQUEST_ON_STACK(req, tfm); |
|---|
| 301 | | - struct scatterlist sg; |
|---|
| 289 | + SHASH_DESC_ON_STACK(desc, tfm); |
|---|
| 302 | 290 | struct page *page = peer_req->pages; |
|---|
| 303 | 291 | struct page *tmp; |
|---|
| 304 | 292 | unsigned len; |
|---|
| 293 | + void *src; |
|---|
| 305 | 294 | |
|---|
| 306 | | - ahash_request_set_tfm(req, tfm); |
|---|
| 307 | | - ahash_request_set_callback(req, 0, NULL, NULL); |
|---|
| 295 | + desc->tfm = tfm; |
|---|
| 308 | 296 | |
|---|
| 309 | | - sg_init_table(&sg, 1); |
|---|
| 310 | | - crypto_ahash_init(req); |
|---|
| 297 | + crypto_shash_init(desc); |
|---|
| 311 | 298 | |
|---|
| 299 | + src = kmap_atomic(page); |
|---|
| 312 | 300 | while ((tmp = page_chain_next(page))) { |
|---|
| 313 | 301 | /* all but the last page will be fully used */ |
|---|
| 314 | | - sg_set_page(&sg, page, PAGE_SIZE, 0); |
|---|
| 315 | | - ahash_request_set_crypt(req, &sg, NULL, sg.length); |
|---|
| 316 | | - crypto_ahash_update(req); |
|---|
| 302 | + crypto_shash_update(desc, src, PAGE_SIZE); |
|---|
| 303 | + kunmap_atomic(src); |
|---|
| 317 | 304 | page = tmp; |
|---|
| 305 | + src = kmap_atomic(page); |
|---|
| 318 | 306 | } |
|---|
| 319 | 307 | /* and now the last, possibly only partially used page */ |
|---|
| 320 | 308 | len = peer_req->i.size & (PAGE_SIZE - 1); |
|---|
| 321 | | - sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); |
|---|
| 322 | | - ahash_request_set_crypt(req, &sg, digest, sg.length); |
|---|
| 323 | | - crypto_ahash_finup(req); |
|---|
| 324 | | - ahash_request_zero(req); |
|---|
| 309 | + crypto_shash_update(desc, src, len ?: PAGE_SIZE); |
|---|
| 310 | + kunmap_atomic(src); |
|---|
| 311 | + |
|---|
| 312 | + crypto_shash_final(desc, digest); |
|---|
| 313 | + shash_desc_zero(desc); |
|---|
| 325 | 314 | } |
|---|
| 326 | 315 | |
|---|
| 327 | | -void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest) |
|---|
| 316 | +void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest) |
|---|
| 328 | 317 | { |
|---|
| 329 | | - AHASH_REQUEST_ON_STACK(req, tfm); |
|---|
| 330 | | - struct scatterlist sg; |
|---|
| 318 | + SHASH_DESC_ON_STACK(desc, tfm); |
|---|
| 331 | 319 | struct bio_vec bvec; |
|---|
| 332 | 320 | struct bvec_iter iter; |
|---|
| 333 | 321 | |
|---|
| 334 | | - ahash_request_set_tfm(req, tfm); |
|---|
| 335 | | - ahash_request_set_callback(req, 0, NULL, NULL); |
|---|
| 322 | + desc->tfm = tfm; |
|---|
| 336 | 323 | |
|---|
| 337 | | - sg_init_table(&sg, 1); |
|---|
| 338 | | - crypto_ahash_init(req); |
|---|
| 324 | + crypto_shash_init(desc); |
|---|
| 339 | 325 | |
|---|
| 340 | 326 | bio_for_each_segment(bvec, bio, iter) { |
|---|
| 341 | | - sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
|---|
| 342 | | - ahash_request_set_crypt(req, &sg, NULL, sg.length); |
|---|
| 343 | | - crypto_ahash_update(req); |
|---|
| 327 | + u8 *src; |
|---|
| 328 | + |
|---|
| 329 | + src = kmap_atomic(bvec.bv_page); |
|---|
| 330 | + crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len); |
|---|
| 331 | + kunmap_atomic(src); |
|---|
| 332 | + |
|---|
| 344 | 333 | /* REQ_OP_WRITE_SAME has only one segment, |
|---|
| 345 | 334 | * checksum the payload only once. */ |
|---|
| 346 | 335 | if (bio_op(bio) == REQ_OP_WRITE_SAME) |
|---|
| 347 | 336 | break; |
|---|
| 348 | 337 | } |
|---|
| 349 | | - ahash_request_set_crypt(req, NULL, digest, 0); |
|---|
| 350 | | - crypto_ahash_final(req); |
|---|
| 351 | | - ahash_request_zero(req); |
|---|
| 338 | + crypto_shash_final(desc, digest); |
|---|
| 339 | + shash_desc_zero(desc); |
|---|
| 352 | 340 | } |
|---|
| 353 | 341 | |
|---|
| 354 | 342 | /* MAYBE merge common code with w_e_end_ov_req */ |
|---|
| .. | .. |
|---|
| 367 | 355 | if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) |
|---|
| 368 | 356 | goto out; |
|---|
| 369 | 357 | |
|---|
| 370 | | - digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm); |
|---|
| 358 | + digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); |
|---|
| 371 | 359 | digest = kmalloc(digest_size, GFP_NOIO); |
|---|
| 372 | 360 | if (digest) { |
|---|
| 373 | 361 | sector_t sector = peer_req->i.sector; |
|---|
| .. | .. |
|---|
| 495 | 483 | fb->values[i] += value; |
|---|
| 496 | 484 | } |
|---|
| 497 | 485 | |
|---|
| 498 | | -struct fifo_buffer *fifo_alloc(int fifo_size) |
|---|
| 486 | +struct fifo_buffer *fifo_alloc(unsigned int fifo_size) |
|---|
| 499 | 487 | { |
|---|
| 500 | 488 | struct fifo_buffer *fb; |
|---|
| 501 | 489 | |
|---|
| 502 | | - fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO); |
|---|
| 490 | + fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO); |
|---|
| 503 | 491 | if (!fb) |
|---|
| 504 | 492 | return NULL; |
|---|
| 505 | 493 | |
|---|
| .. | .. |
|---|
| 603 | 591 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; |
|---|
| 604 | 592 | unsigned long bit; |
|---|
| 605 | 593 | sector_t sector; |
|---|
| 606 | | - const sector_t capacity = drbd_get_capacity(device->this_bdev); |
|---|
| 594 | + const sector_t capacity = get_capacity(device->vdisk); |
|---|
| 607 | 595 | int max_bio_size; |
|---|
| 608 | 596 | int number, rollback_i, size; |
|---|
| 609 | 597 | int align, requeue = 0; |
|---|
| .. | .. |
|---|
| 781 | 769 | { |
|---|
| 782 | 770 | int number, i, size; |
|---|
| 783 | 771 | sector_t sector; |
|---|
| 784 | | - const sector_t capacity = drbd_get_capacity(device->this_bdev); |
|---|
| 772 | + const sector_t capacity = get_capacity(device->vdisk); |
|---|
| 785 | 773 | bool stop_sector_reached = false; |
|---|
| 786 | 774 | |
|---|
| 787 | 775 | if (unlikely(cancel)) |
|---|
| .. | .. |
|---|
| 1205 | 1193 | * a real fix would be much more involved, |
|---|
| 1206 | 1194 | * introducing more locking mechanisms */ |
|---|
| 1207 | 1195 | if (peer_device->connection->csums_tfm) { |
|---|
| 1208 | | - digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm); |
|---|
| 1196 | + digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm); |
|---|
| 1209 | 1197 | D_ASSERT(device, digest_size == di->digest_size); |
|---|
| 1210 | 1198 | digest = kmalloc(digest_size, GFP_NOIO); |
|---|
| 1211 | 1199 | } |
|---|
| .. | .. |
|---|
| 1255 | 1243 | if (unlikely(cancel)) |
|---|
| 1256 | 1244 | goto out; |
|---|
| 1257 | 1245 | |
|---|
| 1258 | | - digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm); |
|---|
| 1246 | + digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); |
|---|
| 1259 | 1247 | digest = kmalloc(digest_size, GFP_NOIO); |
|---|
| 1260 | 1248 | if (!digest) { |
|---|
| 1261 | 1249 | err = 1; /* terminate the connection in case the allocation failed */ |
|---|
| .. | .. |
|---|
| 1327 | 1315 | di = peer_req->digest; |
|---|
| 1328 | 1316 | |
|---|
| 1329 | 1317 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
|---|
| 1330 | | - digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm); |
|---|
| 1318 | + digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm); |
|---|
| 1331 | 1319 | digest = kmalloc(digest_size, GFP_NOIO); |
|---|
| 1332 | 1320 | if (digest) { |
|---|
| 1333 | 1321 | drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); |
|---|
| .. | .. |
|---|
| 1537 | 1525 | |
|---|
| 1538 | 1526 | drbd_req_make_private_bio(req, req->master_bio); |
|---|
| 1539 | 1527 | bio_set_dev(req->private_bio, device->ldev->backing_bdev); |
|---|
| 1540 | | - generic_make_request(req->private_bio); |
|---|
| 1528 | + submit_bio_noacct(req->private_bio); |
|---|
| 1541 | 1529 | |
|---|
| 1542 | 1530 | return 0; |
|---|
| 1543 | 1531 | } |
|---|
| .. | .. |
|---|
| 1684 | 1672 | |
|---|
| 1685 | 1673 | void drbd_rs_controller_reset(struct drbd_device *device) |
|---|
| 1686 | 1674 | { |
|---|
| 1687 | | - struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; |
|---|
| 1675 | + struct gendisk *disk = device->ldev->backing_bdev->bd_disk; |
|---|
| 1688 | 1676 | struct fifo_buffer *plan; |
|---|
| 1689 | 1677 | |
|---|
| 1690 | 1678 | atomic_set(&device->rs_sect_in, 0); |
|---|
| .. | .. |
|---|
| 2110 | 2098 | if (uncork) { |
|---|
| 2111 | 2099 | mutex_lock(&connection->data.mutex); |
|---|
| 2112 | 2100 | if (connection->data.socket) |
|---|
| 2113 | | - drbd_tcp_uncork(connection->data.socket); |
|---|
| 2101 | + tcp_sock_set_cork(connection->data.socket->sk, false); |
|---|
| 2114 | 2102 | mutex_unlock(&connection->data.mutex); |
|---|
| 2115 | 2103 | } |
|---|
| 2116 | 2104 | |
|---|
| .. | .. |
|---|
| 2165 | 2153 | mutex_lock(&connection->data.mutex); |
|---|
| 2166 | 2154 | if (connection->data.socket) { |
|---|
| 2167 | 2155 | if (cork) |
|---|
| 2168 | | - drbd_tcp_cork(connection->data.socket); |
|---|
| 2156 | + tcp_sock_set_cork(connection->data.socket->sk, true); |
|---|
| 2169 | 2157 | else if (!uncork) |
|---|
| 2170 | | - drbd_tcp_uncork(connection->data.socket); |
|---|
| 2158 | + tcp_sock_set_cork(connection->data.socket->sk, false); |
|---|
| 2171 | 2159 | } |
|---|
| 2172 | 2160 | mutex_unlock(&connection->data.mutex); |
|---|
| 2173 | 2161 | } |
|---|