.. | .. |
---|
38 | 38 | #include "cxgb4.h" |
---|
39 | 39 | #include "sched.h" |
---|
40 | 40 | |
---|
41 | | -/* Spinlock must be held by caller */ |
---|
42 | 41 | static int t4_sched_class_fw_cmd(struct port_info *pi, |
---|
43 | 42 | struct ch_sched_params *p, |
---|
44 | 43 | enum sched_fw_ops op) |
---|
.. | .. |
---|
51 | 50 | e = &s->tab[p->u.params.class]; |
---|
52 | 51 | switch (op) { |
---|
53 | 52 | case SCHED_FW_OP_ADD: |
---|
| 53 | + case SCHED_FW_OP_DEL: |
---|
54 | 54 | err = t4_sched_params(adap, p->type, |
---|
55 | 55 | p->u.params.level, p->u.params.mode, |
---|
56 | 56 | p->u.params.rateunit, |
---|
57 | 57 | p->u.params.ratemode, |
---|
58 | 58 | p->u.params.channel, e->idx, |
---|
59 | 59 | p->u.params.minrate, p->u.params.maxrate, |
---|
60 | | - p->u.params.weight, p->u.params.pktsize); |
---|
| 60 | + p->u.params.weight, p->u.params.pktsize, |
---|
| 61 | + p->u.params.burstsize); |
---|
61 | 62 | break; |
---|
62 | 63 | default: |
---|
63 | 64 | err = -ENOTSUPP; |
---|
.. | .. |
---|
67 | 68 | return err; |
---|
68 | 69 | } |
---|
69 | 70 | |
---|
70 | | -/* Spinlock must be held by caller */ |
---|
71 | 71 | static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, |
---|
72 | 72 | enum sched_bind_type type, bool bind) |
---|
73 | 73 | { |
---|
.. | .. |
---|
94 | 94 | |
---|
95 | 95 | pf = adap->pf; |
---|
96 | 96 | vf = 0; |
---|
| 97 | + |
---|
| 98 | + err = t4_set_params(adap, adap->mbox, pf, vf, 1, |
---|
| 99 | + &fw_param, &fw_class); |
---|
| 100 | + break; |
---|
| 101 | + } |
---|
| 102 | + case SCHED_FLOWC: { |
---|
| 103 | + struct sched_flowc_entry *fe; |
---|
| 104 | + |
---|
| 105 | + fe = (struct sched_flowc_entry *)arg; |
---|
| 106 | + |
---|
| 107 | + fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE; |
---|
| 108 | + err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id], |
---|
| 109 | + fe->param.tid, fw_class); |
---|
97 | 110 | break; |
---|
98 | 111 | } |
---|
99 | 112 | default: |
---|
100 | 113 | err = -ENOTSUPP; |
---|
101 | | - goto out; |
---|
| 114 | + break; |
---|
102 | 115 | } |
---|
103 | 116 | |
---|
104 | | - err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class); |
---|
105 | | - |
---|
106 | | -out: |
---|
107 | 117 | return err; |
---|
108 | 118 | } |
---|
109 | 119 | |
---|
110 | | -static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, |
---|
111 | | - const unsigned int qid, |
---|
112 | | - int *index) |
---|
| 120 | +static void *t4_sched_entry_lookup(struct port_info *pi, |
---|
| 121 | + enum sched_bind_type type, |
---|
| 122 | + const u32 val) |
---|
113 | 123 | { |
---|
114 | 124 | struct sched_table *s = pi->sched_tbl; |
---|
115 | 125 | struct sched_class *e, *end; |
---|
116 | | - struct sched_class *found = NULL; |
---|
117 | | - int i; |
---|
| 126 | + void *found = NULL; |
---|
118 | 127 | |
---|
119 | | - /* Look for a class with matching bound queue parameters */ |
---|
| 128 | + /* Look for an entry with matching @val */ |
---|
120 | 129 | end = &s->tab[s->sched_size]; |
---|
121 | 130 | for (e = &s->tab[0]; e != end; ++e) { |
---|
122 | | - struct sched_queue_entry *qe; |
---|
123 | | - |
---|
124 | | - i = 0; |
---|
125 | | - if (e->state == SCHED_STATE_UNUSED) |
---|
| 131 | + if (e->state == SCHED_STATE_UNUSED || |
---|
| 132 | + e->bind_type != type) |
---|
126 | 133 | continue; |
---|
127 | 134 | |
---|
128 | | - list_for_each_entry(qe, &e->queue_list, list) { |
---|
129 | | - if (qe->cntxt_id == qid) { |
---|
130 | | - found = e; |
---|
131 | | - if (index) |
---|
132 | | - *index = i; |
---|
133 | | - break; |
---|
| 135 | + switch (type) { |
---|
| 136 | + case SCHED_QUEUE: { |
---|
| 137 | + struct sched_queue_entry *qe; |
---|
| 138 | + |
---|
| 139 | + list_for_each_entry(qe, &e->entry_list, list) { |
---|
| 140 | + if (qe->cntxt_id == val) { |
---|
| 141 | + found = qe; |
---|
| 142 | + break; |
---|
| 143 | + } |
---|
134 | 144 | } |
---|
135 | | - i++; |
---|
| 145 | + break; |
---|
| 146 | + } |
---|
| 147 | + case SCHED_FLOWC: { |
---|
| 148 | + struct sched_flowc_entry *fe; |
---|
| 149 | + |
---|
| 150 | + list_for_each_entry(fe, &e->entry_list, list) { |
---|
| 151 | + if (fe->param.tid == val) { |
---|
| 152 | + found = fe; |
---|
| 153 | + break; |
---|
| 154 | + } |
---|
| 155 | + } |
---|
| 156 | + break; |
---|
| 157 | + } |
---|
| 158 | + default: |
---|
| 159 | + return NULL; |
---|
136 | 160 | } |
---|
137 | 161 | |
---|
138 | 162 | if (found) |
---|
.. | .. |
---|
142 | 166 | return found; |
---|
143 | 167 | } |
---|
144 | 168 | |
---|
| 169 | +struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, |
---|
| 170 | + struct ch_sched_queue *p) |
---|
| 171 | +{ |
---|
| 172 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 173 | + struct sched_queue_entry *qe = NULL; |
---|
| 174 | + struct adapter *adap = pi->adapter; |
---|
| 175 | + struct sge_eth_txq *txq; |
---|
| 176 | + |
---|
| 177 | + if (p->queue < 0 || p->queue >= pi->nqsets) |
---|
| 178 | + return NULL; |
---|
| 179 | + |
---|
| 180 | + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; |
---|
| 181 | + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); |
---|
| 182 | + return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL; |
---|
| 183 | +} |
---|
| 184 | + |
---|
145 | 185 | static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) |
---|
146 | 186 | { |
---|
147 | | - struct adapter *adap = pi->adapter; |
---|
148 | | - struct sched_class *e; |
---|
149 | 187 | struct sched_queue_entry *qe = NULL; |
---|
| 188 | + struct adapter *adap = pi->adapter; |
---|
150 | 189 | struct sge_eth_txq *txq; |
---|
151 | | - unsigned int qid; |
---|
152 | | - int index = -1; |
---|
| 190 | + struct sched_class *e; |
---|
153 | 191 | int err = 0; |
---|
154 | 192 | |
---|
155 | 193 | if (p->queue < 0 || p->queue >= pi->nqsets) |
---|
156 | 194 | return -ERANGE; |
---|
157 | 195 | |
---|
158 | 196 | txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; |
---|
159 | | - qid = txq->q.cntxt_id; |
---|
160 | 197 | |
---|
161 | | - /* Find the existing class that the queue is bound to */ |
---|
162 | | - e = t4_sched_queue_lookup(pi, qid, &index); |
---|
163 | | - if (e && index >= 0) { |
---|
164 | | - int i = 0; |
---|
165 | | - |
---|
166 | | - spin_lock(&e->lock); |
---|
167 | | - list_for_each_entry(qe, &e->queue_list, list) { |
---|
168 | | - if (i == index) |
---|
169 | | - break; |
---|
170 | | - i++; |
---|
171 | | - } |
---|
| 198 | + /* Find the existing entry that the queue is bound to */ |
---|
| 199 | + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); |
---|
| 200 | + if (qe) { |
---|
172 | 201 | err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, |
---|
173 | 202 | false); |
---|
174 | | - if (err) { |
---|
175 | | - spin_unlock(&e->lock); |
---|
176 | | - goto out; |
---|
177 | | - } |
---|
| 203 | + if (err) |
---|
| 204 | + return err; |
---|
178 | 205 | |
---|
| 206 | + e = &pi->sched_tbl->tab[qe->param.class]; |
---|
179 | 207 | list_del(&qe->list); |
---|
180 | 208 | kvfree(qe); |
---|
181 | | - if (atomic_dec_and_test(&e->refcnt)) { |
---|
182 | | - e->state = SCHED_STATE_UNUSED; |
---|
183 | | - memset(&e->info, 0, sizeof(e->info)); |
---|
184 | | - } |
---|
185 | | - spin_unlock(&e->lock); |
---|
| 209 | + if (atomic_dec_and_test(&e->refcnt)) |
---|
| 210 | + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); |
---|
186 | 211 | } |
---|
187 | | -out: |
---|
188 | 212 | return err; |
---|
189 | 213 | } |
---|
190 | 214 | |
---|
191 | 215 | static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) |
---|
192 | 216 | { |
---|
193 | | - struct adapter *adap = pi->adapter; |
---|
194 | 217 | struct sched_table *s = pi->sched_tbl; |
---|
195 | | - struct sched_class *e; |
---|
196 | 218 | struct sched_queue_entry *qe = NULL; |
---|
| 219 | + struct adapter *adap = pi->adapter; |
---|
197 | 220 | struct sge_eth_txq *txq; |
---|
| 221 | + struct sched_class *e; |
---|
198 | 222 | unsigned int qid; |
---|
199 | 223 | int err = 0; |
---|
200 | 224 | |
---|
.. | .. |
---|
210 | 234 | |
---|
211 | 235 | /* Unbind queue from any existing class */ |
---|
212 | 236 | err = t4_sched_queue_unbind(pi, p); |
---|
213 | | - if (err) { |
---|
214 | | - kvfree(qe); |
---|
215 | | - goto out; |
---|
216 | | - } |
---|
| 237 | + if (err) |
---|
| 238 | + goto out_err; |
---|
217 | 239 | |
---|
218 | 240 | /* Bind queue to specified class */ |
---|
219 | | - memset(qe, 0, sizeof(*qe)); |
---|
220 | 241 | qe->cntxt_id = qid; |
---|
221 | 242 | memcpy(&qe->param, p, sizeof(qe->param)); |
---|
222 | 243 | |
---|
223 | 244 | e = &s->tab[qe->param.class]; |
---|
224 | | - spin_lock(&e->lock); |
---|
225 | 245 | err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); |
---|
226 | | - if (err) { |
---|
227 | | - kvfree(qe); |
---|
228 | | - spin_unlock(&e->lock); |
---|
229 | | - goto out; |
---|
230 | | - } |
---|
| 246 | + if (err) |
---|
| 247 | + goto out_err; |
---|
231 | 248 | |
---|
232 | | - list_add_tail(&qe->list, &e->queue_list); |
---|
| 249 | + list_add_tail(&qe->list, &e->entry_list); |
---|
| 250 | + e->bind_type = SCHED_QUEUE; |
---|
233 | 251 | atomic_inc(&e->refcnt); |
---|
234 | | - spin_unlock(&e->lock); |
---|
235 | | -out: |
---|
| 252 | + return err; |
---|
| 253 | + |
---|
| 254 | +out_err: |
---|
| 255 | + kvfree(qe); |
---|
| 256 | + return err; |
---|
| 257 | +} |
---|
| 258 | + |
---|
| 259 | +static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) |
---|
| 260 | +{ |
---|
| 261 | + struct sched_flowc_entry *fe = NULL; |
---|
| 262 | + struct adapter *adap = pi->adapter; |
---|
| 263 | + struct sched_class *e; |
---|
| 264 | + int err = 0; |
---|
| 265 | + |
---|
| 266 | + if (p->tid < 0 || p->tid >= adap->tids.neotids) |
---|
| 267 | + return -ERANGE; |
---|
| 268 | + |
---|
| 269 | + /* Find the existing entry that the flowc is bound to */ |
---|
| 270 | + fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid); |
---|
| 271 | + if (fe) { |
---|
| 272 | + err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, |
---|
| 273 | + false); |
---|
| 274 | + if (err) |
---|
| 275 | + return err; |
---|
| 276 | + |
---|
| 277 | + e = &pi->sched_tbl->tab[fe->param.class]; |
---|
| 278 | + list_del(&fe->list); |
---|
| 279 | + kvfree(fe); |
---|
| 280 | + if (atomic_dec_and_test(&e->refcnt)) |
---|
| 281 | + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); |
---|
| 282 | + } |
---|
| 283 | + return err; |
---|
| 284 | +} |
---|
| 285 | + |
---|
| 286 | +static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p) |
---|
| 287 | +{ |
---|
| 288 | + struct sched_table *s = pi->sched_tbl; |
---|
| 289 | + struct sched_flowc_entry *fe = NULL; |
---|
| 290 | + struct adapter *adap = pi->adapter; |
---|
| 291 | + struct sched_class *e; |
---|
| 292 | + int err = 0; |
---|
| 293 | + |
---|
| 294 | + if (p->tid < 0 || p->tid >= adap->tids.neotids) |
---|
| 295 | + return -ERANGE; |
---|
| 296 | + |
---|
| 297 | + fe = kvzalloc(sizeof(*fe), GFP_KERNEL); |
---|
| 298 | + if (!fe) |
---|
| 299 | + return -ENOMEM; |
---|
| 300 | + |
---|
| 301 | + /* Unbind flowc from any existing class */ |
---|
| 302 | + err = t4_sched_flowc_unbind(pi, p); |
---|
| 303 | + if (err) |
---|
| 304 | + goto out_err; |
---|
| 305 | + |
---|
| 306 | + /* Bind flowc to specified class */ |
---|
| 307 | + memcpy(&fe->param, p, sizeof(fe->param)); |
---|
| 308 | + |
---|
| 309 | + e = &s->tab[fe->param.class]; |
---|
| 310 | + err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true); |
---|
| 311 | + if (err) |
---|
| 312 | + goto out_err; |
---|
| 313 | + |
---|
| 314 | + list_add_tail(&fe->list, &e->entry_list); |
---|
| 315 | + e->bind_type = SCHED_FLOWC; |
---|
| 316 | + atomic_inc(&e->refcnt); |
---|
| 317 | + return err; |
---|
| 318 | + |
---|
| 319 | +out_err: |
---|
| 320 | + kvfree(fe); |
---|
236 | 321 | return err; |
---|
237 | 322 | } |
---|
238 | 323 | |
---|
.. | .. |
---|
247 | 332 | case SCHED_QUEUE: { |
---|
248 | 333 | struct sched_queue_entry *qe; |
---|
249 | 334 | |
---|
250 | | - list_for_each_entry(qe, &e->queue_list, list) |
---|
| 335 | + list_for_each_entry(qe, &e->entry_list, list) |
---|
251 | 336 | t4_sched_queue_unbind(pi, &qe->param); |
---|
| 337 | + break; |
---|
| 338 | + } |
---|
| 339 | + case SCHED_FLOWC: { |
---|
| 340 | + struct sched_flowc_entry *fe; |
---|
| 341 | + |
---|
| 342 | + list_for_each_entry(fe, &e->entry_list, list) |
---|
| 343 | + t4_sched_flowc_unbind(pi, &fe->param); |
---|
252 | 344 | break; |
---|
253 | 345 | } |
---|
254 | 346 | default: |
---|
.. | .. |
---|
274 | 366 | err = t4_sched_queue_unbind(pi, qe); |
---|
275 | 367 | break; |
---|
276 | 368 | } |
---|
| 369 | + case SCHED_FLOWC: { |
---|
| 370 | + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; |
---|
| 371 | + |
---|
| 372 | + if (bind) |
---|
| 373 | + err = t4_sched_flowc_bind(pi, fe); |
---|
| 374 | + else |
---|
| 375 | + err = t4_sched_flowc_unbind(pi, fe); |
---|
| 376 | + break; |
---|
| 377 | + } |
---|
277 | 378 | default: |
---|
278 | 379 | err = -ENOTSUPP; |
---|
279 | 380 | break; |
---|
.. | .. |
---|
296 | 397 | enum sched_bind_type type) |
---|
297 | 398 | { |
---|
298 | 399 | struct port_info *pi = netdev2pinfo(dev); |
---|
299 | | - struct sched_table *s; |
---|
300 | | - int err = 0; |
---|
301 | 400 | u8 class_id; |
---|
302 | 401 | |
---|
303 | 402 | if (!can_sched(dev)) |
---|
.. | .. |
---|
313 | 412 | class_id = qe->class; |
---|
314 | 413 | break; |
---|
315 | 414 | } |
---|
| 415 | + case SCHED_FLOWC: { |
---|
| 416 | + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; |
---|
| 417 | + |
---|
| 418 | + class_id = fe->class; |
---|
| 419 | + break; |
---|
| 420 | + } |
---|
316 | 421 | default: |
---|
317 | 422 | return -ENOTSUPP; |
---|
318 | 423 | } |
---|
.. | .. |
---|
323 | 428 | if (class_id == SCHED_CLS_NONE) |
---|
324 | 429 | return -ENOTSUPP; |
---|
325 | 430 | |
---|
326 | | - s = pi->sched_tbl; |
---|
327 | | - write_lock(&s->rw_lock); |
---|
328 | | - err = t4_sched_class_bind_unbind_op(pi, arg, type, true); |
---|
329 | | - write_unlock(&s->rw_lock); |
---|
| 431 | + return t4_sched_class_bind_unbind_op(pi, arg, type, true); |
---|
330 | 432 | |
---|
331 | | - return err; |
---|
332 | 433 | } |
---|
333 | 434 | |
---|
334 | 435 | /** |
---|
.. | .. |
---|
343 | 444 | enum sched_bind_type type) |
---|
344 | 445 | { |
---|
345 | 446 | struct port_info *pi = netdev2pinfo(dev); |
---|
346 | | - struct sched_table *s; |
---|
347 | | - int err = 0; |
---|
348 | 447 | u8 class_id; |
---|
349 | 448 | |
---|
350 | 449 | if (!can_sched(dev)) |
---|
.. | .. |
---|
360 | 459 | class_id = qe->class; |
---|
361 | 460 | break; |
---|
362 | 461 | } |
---|
| 462 | + case SCHED_FLOWC: { |
---|
| 463 | + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; |
---|
| 464 | + |
---|
| 465 | + class_id = fe->class; |
---|
| 466 | + break; |
---|
| 467 | + } |
---|
363 | 468 | default: |
---|
364 | 469 | return -ENOTSUPP; |
---|
365 | 470 | } |
---|
.. | .. |
---|
367 | 472 | if (!valid_class_id(dev, class_id)) |
---|
368 | 473 | return -EINVAL; |
---|
369 | 474 | |
---|
370 | | - s = pi->sched_tbl; |
---|
371 | | - write_lock(&s->rw_lock); |
---|
372 | | - err = t4_sched_class_bind_unbind_op(pi, arg, type, false); |
---|
373 | | - write_unlock(&s->rw_lock); |
---|
374 | | - |
---|
375 | | - return err; |
---|
| 475 | + return t4_sched_class_bind_unbind_op(pi, arg, type, false); |
---|
376 | 476 | } |
---|
377 | 477 | |
---|
378 | 478 | /* If @p is NULL, fetch any available unused class */ |
---|
.. | .. |
---|
380 | 480 | const struct ch_sched_params *p) |
---|
381 | 481 | { |
---|
382 | 482 | struct sched_table *s = pi->sched_tbl; |
---|
383 | | - struct sched_class *e, *end; |
---|
384 | 483 | struct sched_class *found = NULL; |
---|
| 484 | + struct sched_class *e, *end; |
---|
385 | 485 | |
---|
386 | 486 | if (!p) { |
---|
387 | 487 | /* Get any available unused class */ |
---|
.. | .. |
---|
425 | 525 | static struct sched_class *t4_sched_class_alloc(struct port_info *pi, |
---|
426 | 526 | struct ch_sched_params *p) |
---|
427 | 527 | { |
---|
428 | | - struct sched_table *s = pi->sched_tbl; |
---|
429 | | - struct sched_class *e; |
---|
| 528 | + struct sched_class *e = NULL; |
---|
430 | 529 | u8 class_id; |
---|
431 | 530 | int err; |
---|
432 | 531 | |
---|
.. | .. |
---|
441 | 540 | if (class_id != SCHED_CLS_NONE) |
---|
442 | 541 | return NULL; |
---|
443 | 542 | |
---|
444 | | - write_lock(&s->rw_lock); |
---|
445 | | - /* See if there's an exisiting class with same |
---|
446 | | - * requested sched params |
---|
| 543 | + /* See if there's an exisiting class with same requested sched |
---|
| 544 | + * params. Classes can only be shared among FLOWC types. For |
---|
| 545 | + * other types, always request a new class. |
---|
447 | 546 | */ |
---|
448 | | - e = t4_sched_class_lookup(pi, p); |
---|
| 547 | + if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) |
---|
| 548 | + e = t4_sched_class_lookup(pi, p); |
---|
| 549 | + |
---|
449 | 550 | if (!e) { |
---|
450 | 551 | struct ch_sched_params np; |
---|
451 | 552 | |
---|
452 | 553 | /* Fetch any available unused class */ |
---|
453 | 554 | e = t4_sched_class_lookup(pi, NULL); |
---|
454 | 555 | if (!e) |
---|
455 | | - goto out; |
---|
| 556 | + return NULL; |
---|
456 | 557 | |
---|
457 | 558 | memcpy(&np, p, sizeof(np)); |
---|
458 | 559 | np.u.params.class = e->idx; |
---|
459 | | - |
---|
460 | | - spin_lock(&e->lock); |
---|
461 | 560 | /* New class */ |
---|
462 | 561 | err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); |
---|
463 | | - if (err) { |
---|
464 | | - spin_unlock(&e->lock); |
---|
465 | | - e = NULL; |
---|
466 | | - goto out; |
---|
467 | | - } |
---|
| 562 | + if (err) |
---|
| 563 | + return NULL; |
---|
468 | 564 | memcpy(&e->info, &np, sizeof(e->info)); |
---|
469 | 565 | atomic_set(&e->refcnt, 0); |
---|
470 | 566 | e->state = SCHED_STATE_ACTIVE; |
---|
471 | | - spin_unlock(&e->lock); |
---|
472 | 567 | } |
---|
473 | 568 | |
---|
474 | | -out: |
---|
475 | | - write_unlock(&s->rw_lock); |
---|
476 | 569 | return e; |
---|
477 | 570 | } |
---|
478 | 571 | |
---|
.. | .. |
---|
502 | 595 | return t4_sched_class_alloc(pi, p); |
---|
503 | 596 | } |
---|
504 | 597 | |
---|
505 | | -static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) |
---|
| 598 | +/** |
---|
| 599 | + * cxgb4_sched_class_free - free a scheduling class |
---|
| 600 | + * @dev: net_device pointer |
---|
| 601 | + * @classid: scheduling class id to free |
---|
| 602 | + * |
---|
| 603 | + * Frees a scheduling class if there are no users. |
---|
| 604 | + */ |
---|
| 605 | +void cxgb4_sched_class_free(struct net_device *dev, u8 classid) |
---|
506 | 606 | { |
---|
507 | | - t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); |
---|
| 607 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 608 | + struct sched_table *s = pi->sched_tbl; |
---|
| 609 | + struct ch_sched_params p; |
---|
| 610 | + struct sched_class *e; |
---|
| 611 | + u32 speed; |
---|
| 612 | + int ret; |
---|
| 613 | + |
---|
| 614 | + e = &s->tab[classid]; |
---|
| 615 | + if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { |
---|
| 616 | + /* Port based rate limiting needs explicit reset back |
---|
| 617 | + * to max rate. But, we'll do explicit reset for all |
---|
| 618 | + * types, instead of just port based type, to be on |
---|
| 619 | + * the safer side. |
---|
| 620 | + */ |
---|
| 621 | + memcpy(&p, &e->info, sizeof(p)); |
---|
| 622 | + /* Always reset mode to 0. Otherwise, FLOWC mode will |
---|
| 623 | + * still be enabled even after resetting the traffic |
---|
| 624 | + * class. |
---|
| 625 | + */ |
---|
| 626 | + p.u.params.mode = 0; |
---|
| 627 | + p.u.params.minrate = 0; |
---|
| 628 | + p.u.params.pktsize = 0; |
---|
| 629 | + |
---|
| 630 | + ret = t4_get_link_params(pi, NULL, &speed, NULL); |
---|
| 631 | + if (!ret) |
---|
| 632 | + p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ |
---|
| 633 | + else |
---|
| 634 | + p.u.params.maxrate = SCHED_MAX_RATE_KBPS; |
---|
| 635 | + |
---|
| 636 | + t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); |
---|
| 637 | + |
---|
| 638 | + e->state = SCHED_STATE_UNUSED; |
---|
| 639 | + memset(&e->info, 0, sizeof(e->info)); |
---|
| 640 | + } |
---|
| 641 | +} |
---|
| 642 | + |
---|
| 643 | +static void t4_sched_class_free(struct net_device *dev, struct sched_class *e) |
---|
| 644 | +{ |
---|
| 645 | + struct port_info *pi = netdev2pinfo(dev); |
---|
| 646 | + |
---|
| 647 | + t4_sched_class_unbind_all(pi, e, e->bind_type); |
---|
| 648 | + cxgb4_sched_class_free(dev, e->idx); |
---|
508 | 649 | } |
---|
509 | 650 | |
---|
510 | 651 | struct sched_table *t4_init_sched(unsigned int sched_size) |
---|
.. | .. |
---|
512 | 653 | struct sched_table *s; |
---|
513 | 654 | unsigned int i; |
---|
514 | 655 | |
---|
515 | | - s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL); |
---|
| 656 | + s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL); |
---|
516 | 657 | if (!s) |
---|
517 | 658 | return NULL; |
---|
518 | 659 | |
---|
519 | 660 | s->sched_size = sched_size; |
---|
520 | | - rwlock_init(&s->rw_lock); |
---|
521 | 661 | |
---|
522 | 662 | for (i = 0; i < s->sched_size; i++) { |
---|
523 | 663 | memset(&s->tab[i], 0, sizeof(struct sched_class)); |
---|
524 | 664 | s->tab[i].idx = i; |
---|
525 | 665 | s->tab[i].state = SCHED_STATE_UNUSED; |
---|
526 | | - INIT_LIST_HEAD(&s->tab[i].queue_list); |
---|
527 | | - spin_lock_init(&s->tab[i].lock); |
---|
| 666 | + INIT_LIST_HEAD(&s->tab[i].entry_list); |
---|
528 | 667 | atomic_set(&s->tab[i].refcnt, 0); |
---|
529 | 668 | } |
---|
530 | 669 | return s; |
---|
.. | .. |
---|
545 | 684 | for (i = 0; i < s->sched_size; i++) { |
---|
546 | 685 | struct sched_class *e; |
---|
547 | 686 | |
---|
548 | | - write_lock(&s->rw_lock); |
---|
549 | 687 | e = &s->tab[i]; |
---|
550 | 688 | if (e->state == SCHED_STATE_ACTIVE) |
---|
551 | | - t4_sched_class_free(pi, e); |
---|
552 | | - write_unlock(&s->rw_lock); |
---|
| 689 | + t4_sched_class_free(adap->port[j], e); |
---|
553 | 690 | } |
---|
554 | 691 | kvfree(s); |
---|
555 | 692 | } |
---|