| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* Copyright (c) 2016 Facebook |
|---|
| 2 | | - * |
|---|
| 3 | | - * This program is free software; you can redistribute it and/or |
|---|
| 4 | | - * modify it under the terms of version 2 of the GNU General Public |
|---|
| 5 | | - * License as published by the Free Software Foundation. |
|---|
| 6 | 3 | */ |
|---|
| 7 | 4 | #ifndef __BPF_LRU_LIST_H_ |
|---|
| 8 | 5 | #define __BPF_LRU_LIST_H_ |
|---|
| 9 | 6 | |
|---|
| 7 | +#include <linux/cache.h> |
|---|
| 10 | 8 | #include <linux/list.h> |
|---|
| 11 | 9 | #include <linux/spinlock_types.h> |
|---|
| 12 | 10 | |
|---|
| .. | .. |
|---|
| 33 | 31 | struct bpf_lru_list { |
|---|
| 34 | 32 | struct list_head lists[NR_BPF_LRU_LIST_T]; |
|---|
| 35 | 33 | unsigned int counts[NR_BPF_LRU_LIST_COUNT]; |
|---|
| 36 | | - /* The next inacitve list rotation starts from here */ |
|---|
| 34 | + /* The next inactive list rotation starts from here */ |
|---|
| 37 | 35 | struct list_head *next_inactive_rotation; |
|---|
| 38 | 36 | |
|---|
| 39 | 37 | raw_spinlock_t lock ____cacheline_aligned_in_smp; |
|---|
| .. | .. |
|---|
| 66 | 64 | |
|---|
| 67 | 65 | static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) |
|---|
| 68 | 66 | { |
|---|
| 69 | | - /* ref is an approximation on access frequency. It does not |
|---|
| 70 | | - * have to be very accurate. Hence, no protection is used. |
|---|
| 71 | | - */ |
|---|
| 72 | | - if (!node->ref) |
|---|
| 73 | | - node->ref = 1; |
|---|
| 67 | + if (!READ_ONCE(node->ref)) |
|---|
| 68 | + WRITE_ONCE(node->ref, 1); |
|---|
| 74 | 69 | } |
|---|
| 75 | 70 | |
|---|
| 76 | 71 | int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, |
|---|