.. | .. |
---|
47 | 47 | |
---|
48 | 48 | smt_size = SMT_SIZE; |
---|
49 | 49 | |
---|
50 | | - s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry), |
---|
51 | | - GFP_KERNEL); |
---|
| 50 | + s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL); |
---|
52 | 51 | if (!s) |
---|
53 | 52 | return NULL; |
---|
54 | 53 | s->smt_size = smt_size; |
---|
.. | .. |
---|
56 | 55 | for (i = 0; i < s->smt_size; ++i) { |
---|
57 | 56 | s->smtab[i].idx = i; |
---|
58 | 57 | s->smtab[i].state = SMT_STATE_UNUSED; |
---|
59 | | - memset(&s->smtab[i].src_mac, 0, ETH_ALEN); |
---|
| 58 | + eth_zero_addr(s->smtab[i].src_mac); |
---|
60 | 59 | spin_lock_init(&s->smtab[i].lock); |
---|
61 | | - atomic_set(&s->smtab[i].refcnt, 0); |
---|
| 60 | + s->smtab[i].refcnt = 0; |
---|
62 | 61 | } |
---|
63 | 62 | return s; |
---|
64 | 63 | } |
---|
.. | .. |
---|
69 | 68 | struct smt_entry *e, *end; |
---|
70 | 69 | |
---|
71 | 70 | for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { |
---|
72 | | - if (atomic_read(&e->refcnt) == 0) { |
---|
| 71 | + if (e->refcnt == 0) { |
---|
73 | 72 | if (!first_free) |
---|
74 | 73 | first_free = e; |
---|
75 | 74 | } else { |
---|
.. | .. |
---|
98 | 97 | |
---|
99 | 98 | static void t4_smte_free(struct smt_entry *e) |
---|
100 | 99 | { |
---|
101 | | - if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ |
---|
| 100 | + if (e->refcnt == 0) { /* hasn't been recycled */ |
---|
102 | 101 | e->state = SMT_STATE_UNUSED; |
---|
103 | 102 | } |
---|
104 | 103 | } |
---|
105 | 104 | |
---|
106 | 105 | /** |
---|
| 106 | + * cxgb4_smt_release - Release SMT entry |
---|
107 | 107 | * @e: smt entry to release |
---|
108 | 108 | * |
---|
109 | 109 | * Releases ref count and frees up an smt entry from SMT table |
---|
.. | .. |
---|
111 | 111 | void cxgb4_smt_release(struct smt_entry *e) |
---|
112 | 112 | { |
---|
113 | 113 | spin_lock_bh(&e->lock); |
---|
114 | | - if (atomic_dec_and_test(&e->refcnt)) |
---|
| 114 | + if ((--e->refcnt) == 0) |
---|
115 | 115 | t4_smte_free(e); |
---|
116 | 116 | spin_unlock_bh(&e->lock); |
---|
117 | 117 | } |
---|
.. | .. |
---|
216 | 216 | e = find_or_alloc_smte(s, smac); |
---|
217 | 217 | if (e) { |
---|
218 | 218 | spin_lock(&e->lock); |
---|
219 | | - if (!atomic_read(&e->refcnt)) { |
---|
220 | | - atomic_set(&e->refcnt, 1); |
---|
| 219 | + if (!e->refcnt) { |
---|
| 220 | + e->refcnt = 1; |
---|
221 | 221 | e->state = SMT_STATE_SWITCHING; |
---|
222 | 222 | e->pfvf = pfvf; |
---|
223 | 223 | memcpy(e->src_mac, smac, ETH_ALEN); |
---|
224 | 224 | write_smt_entry(adap, e); |
---|
225 | 225 | } else { |
---|
226 | | - atomic_inc(&e->refcnt); |
---|
| 226 | + ++e->refcnt; |
---|
227 | 227 | } |
---|
228 | 228 | spin_unlock(&e->lock); |
---|
229 | 229 | } |
---|
.. | .. |
---|
232 | 232 | } |
---|
233 | 233 | |
---|
234 | 234 | /** |
---|
| 235 | + * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters. |
---|
235 | 236 | * @dev: net_device pointer |
---|
236 | 237 | * @smac: MAC address to add to SMT |
---|
237 | 238 | * Returns pointer to the SMT entry created |
---|