| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* Copyright (c) 2016 Facebook |
|---|
| 2 | | - * |
|---|
| 3 | | - * This program is free software; you can redistribute it and/or |
|---|
| 4 | | - * modify it under the terms of version 2 of the GNU General Public |
|---|
| 5 | | - * License as published by the Free Software Foundation. |
|---|
| 6 | 3 | */ |
|---|
| 7 | 4 | #include <linux/cpumask.h> |
|---|
| 8 | 5 | #include <linux/spinlock.h> |
|---|
| .. | .. |
|---|
| 44 | 41 | /* bpf_lru_node helpers */ |
|---|
| 45 | 42 | static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) |
|---|
| 46 | 43 | { |
|---|
| 47 | | - return node->ref; |
|---|
| 44 | + return READ_ONCE(node->ref); |
|---|
| 45 | +} |
|---|
| 46 | + |
|---|
| 47 | +static void bpf_lru_node_clear_ref(struct bpf_lru_node *node) |
|---|
| 48 | +{ |
|---|
| 49 | + WRITE_ONCE(node->ref, 0); |
|---|
| 48 | 50 | } |
|---|
| 49 | 51 | |
|---|
| 50 | 52 | static void bpf_lru_list_count_inc(struct bpf_lru_list *l, |
|---|
| .. | .. |
|---|
| 92 | 94 | |
|---|
| 93 | 95 | bpf_lru_list_count_inc(l, tgt_type); |
|---|
| 94 | 96 | node->type = tgt_type; |
|---|
| 95 | | - node->ref = 0; |
|---|
| 97 | + bpf_lru_node_clear_ref(node); |
|---|
| 96 | 98 | list_move(&node->list, &l->lists[tgt_type]); |
|---|
| 97 | 99 | } |
|---|
| 98 | 100 | |
|---|
| .. | .. |
|---|
| 113 | 115 | bpf_lru_list_count_inc(l, tgt_type); |
|---|
| 114 | 116 | node->type = tgt_type; |
|---|
| 115 | 117 | } |
|---|
| 116 | | - node->ref = 0; |
|---|
| 118 | + bpf_lru_node_clear_ref(node); |
|---|
| 117 | 119 | |
|---|
| 118 | 120 | /* If the moving node is the next_inactive_rotation candidate, |
|---|
| 119 | 121 | * move the next_inactive_rotation pointer also. |
|---|
| .. | .. |
|---|
| 356 | 358 | *(u32 *)((void *)node + lru->hash_offset) = hash; |
|---|
| 357 | 359 | node->cpu = cpu; |
|---|
| 358 | 360 | node->type = BPF_LRU_LOCAL_LIST_T_PENDING; |
|---|
| 359 | | - node->ref = 0; |
|---|
| 361 | + bpf_lru_node_clear_ref(node); |
|---|
| 360 | 362 | list_add(&node->list, local_pending_list(loc_l)); |
|---|
| 361 | 363 | } |
|---|
| 362 | 364 | |
|---|
| .. | .. |
|---|
| 422 | 424 | if (!list_empty(free_list)) { |
|---|
| 423 | 425 | node = list_first_entry(free_list, struct bpf_lru_node, list); |
|---|
| 424 | 426 | *(u32 *)((void *)node + lru->hash_offset) = hash; |
|---|
| 425 | | - node->ref = 0; |
|---|
| 427 | + bpf_lru_node_clear_ref(node); |
|---|
| 426 | 428 | __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); |
|---|
| 427 | 429 | } |
|---|
| 428 | 430 | |
|---|
| .. | .. |
|---|
| 525 | 527 | } |
|---|
| 526 | 528 | |
|---|
| 527 | 529 | node->type = BPF_LRU_LOCAL_LIST_T_FREE; |
|---|
| 528 | | - node->ref = 0; |
|---|
| 530 | + bpf_lru_node_clear_ref(node); |
|---|
| 529 | 531 | list_move(&node->list, local_free_list(loc_l)); |
|---|
| 530 | 532 | |
|---|
| 531 | 533 | raw_spin_unlock_irqrestore(&loc_l->lock, flags); |
|---|
| .. | .. |
|---|
| 571 | 573 | |
|---|
| 572 | 574 | node = (struct bpf_lru_node *)(buf + node_offset); |
|---|
| 573 | 575 | node->type = BPF_LRU_LIST_T_FREE; |
|---|
| 574 | | - node->ref = 0; |
|---|
| 576 | + bpf_lru_node_clear_ref(node); |
|---|
| 575 | 577 | list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); |
|---|
| 576 | 578 | buf += elem_size; |
|---|
| 577 | 579 | } |
|---|
| .. | .. |
|---|
| 597 | 599 | node = (struct bpf_lru_node *)(buf + node_offset); |
|---|
| 598 | 600 | node->cpu = cpu; |
|---|
| 599 | 601 | node->type = BPF_LRU_LIST_T_FREE; |
|---|
| 600 | | - node->ref = 0; |
|---|
| 602 | + bpf_lru_node_clear_ref(node); |
|---|
| 601 | 603 | list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); |
|---|
| 602 | 604 | i++; |
|---|
| 603 | 605 | buf += elem_size; |
|---|