forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/chelsio/cxgb4/sched.c
....@@ -38,7 +38,6 @@
3838 #include "cxgb4.h"
3939 #include "sched.h"
4040
41
-/* Spinlock must be held by caller */
4241 static int t4_sched_class_fw_cmd(struct port_info *pi,
4342 struct ch_sched_params *p,
4443 enum sched_fw_ops op)
....@@ -51,13 +50,15 @@
5150 e = &s->tab[p->u.params.class];
5251 switch (op) {
5352 case SCHED_FW_OP_ADD:
53
+ case SCHED_FW_OP_DEL:
5454 err = t4_sched_params(adap, p->type,
5555 p->u.params.level, p->u.params.mode,
5656 p->u.params.rateunit,
5757 p->u.params.ratemode,
5858 p->u.params.channel, e->idx,
5959 p->u.params.minrate, p->u.params.maxrate,
60
- p->u.params.weight, p->u.params.pktsize);
60
+ p->u.params.weight, p->u.params.pktsize,
61
+ p->u.params.burstsize);
6162 break;
6263 default:
6364 err = -ENOTSUPP;
....@@ -67,7 +68,6 @@
6768 return err;
6869 }
6970
70
-/* Spinlock must be held by caller */
7171 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
7272 enum sched_bind_type type, bool bind)
7373 {
....@@ -94,45 +94,69 @@
9494
9595 pf = adap->pf;
9696 vf = 0;
97
+
98
+ err = t4_set_params(adap, adap->mbox, pf, vf, 1,
99
+ &fw_param, &fw_class);
100
+ break;
101
+ }
102
+ case SCHED_FLOWC: {
103
+ struct sched_flowc_entry *fe;
104
+
105
+ fe = (struct sched_flowc_entry *)arg;
106
+
107
+ fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
108
+ err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
109
+ fe->param.tid, fw_class);
97110 break;
98111 }
99112 default:
100113 err = -ENOTSUPP;
101
- goto out;
114
+ break;
102115 }
103116
104
- err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
105
-
106
-out:
107117 return err;
108118 }
109119
110
-static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
111
- const unsigned int qid,
112
- int *index)
120
+static void *t4_sched_entry_lookup(struct port_info *pi,
121
+ enum sched_bind_type type,
122
+ const u32 val)
113123 {
114124 struct sched_table *s = pi->sched_tbl;
115125 struct sched_class *e, *end;
116
- struct sched_class *found = NULL;
117
- int i;
126
+ void *found = NULL;
118127
119
- /* Look for a class with matching bound queue parameters */
128
+ /* Look for an entry with matching @val */
120129 end = &s->tab[s->sched_size];
121130 for (e = &s->tab[0]; e != end; ++e) {
122
- struct sched_queue_entry *qe;
123
-
124
- i = 0;
125
- if (e->state == SCHED_STATE_UNUSED)
131
+ if (e->state == SCHED_STATE_UNUSED ||
132
+ e->bind_type != type)
126133 continue;
127134
128
- list_for_each_entry(qe, &e->queue_list, list) {
129
- if (qe->cntxt_id == qid) {
130
- found = e;
131
- if (index)
132
- *index = i;
133
- break;
135
+ switch (type) {
136
+ case SCHED_QUEUE: {
137
+ struct sched_queue_entry *qe;
138
+
139
+ list_for_each_entry(qe, &e->entry_list, list) {
140
+ if (qe->cntxt_id == val) {
141
+ found = qe;
142
+ break;
143
+ }
134144 }
135
- i++;
145
+ break;
146
+ }
147
+ case SCHED_FLOWC: {
148
+ struct sched_flowc_entry *fe;
149
+
150
+ list_for_each_entry(fe, &e->entry_list, list) {
151
+ if (fe->param.tid == val) {
152
+ found = fe;
153
+ break;
154
+ }
155
+ }
156
+ break;
157
+ }
158
+ default:
159
+ return NULL;
136160 }
137161
138162 if (found)
....@@ -142,59 +166,59 @@
142166 return found;
143167 }
144168
169
+struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
170
+ struct ch_sched_queue *p)
171
+{
172
+ struct port_info *pi = netdev2pinfo(dev);
173
+ struct sched_queue_entry *qe = NULL;
174
+ struct adapter *adap = pi->adapter;
175
+ struct sge_eth_txq *txq;
176
+
177
+ if (p->queue < 0 || p->queue >= pi->nqsets)
178
+ return NULL;
179
+
180
+ txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
181
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
182
+ return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
183
+}
184
+
145185 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
146186 {
147
- struct adapter *adap = pi->adapter;
148
- struct sched_class *e;
149187 struct sched_queue_entry *qe = NULL;
188
+ struct adapter *adap = pi->adapter;
150189 struct sge_eth_txq *txq;
151
- unsigned int qid;
152
- int index = -1;
190
+ struct sched_class *e;
153191 int err = 0;
154192
155193 if (p->queue < 0 || p->queue >= pi->nqsets)
156194 return -ERANGE;
157195
158196 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
159
- qid = txq->q.cntxt_id;
160197
161
- /* Find the existing class that the queue is bound to */
162
- e = t4_sched_queue_lookup(pi, qid, &index);
163
- if (e && index >= 0) {
164
- int i = 0;
165
-
166
- spin_lock(&e->lock);
167
- list_for_each_entry(qe, &e->queue_list, list) {
168
- if (i == index)
169
- break;
170
- i++;
171
- }
198
+ /* Find the existing entry that the queue is bound to */
199
+ qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
200
+ if (qe) {
172201 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
173202 false);
174
- if (err) {
175
- spin_unlock(&e->lock);
176
- goto out;
177
- }
203
+ if (err)
204
+ return err;
178205
206
+ e = &pi->sched_tbl->tab[qe->param.class];
179207 list_del(&qe->list);
180208 kvfree(qe);
181
- if (atomic_dec_and_test(&e->refcnt)) {
182
- e->state = SCHED_STATE_UNUSED;
183
- memset(&e->info, 0, sizeof(e->info));
184
- }
185
- spin_unlock(&e->lock);
209
+ if (atomic_dec_and_test(&e->refcnt))
210
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
186211 }
187
-out:
188212 return err;
189213 }
190214
191215 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
192216 {
193
- struct adapter *adap = pi->adapter;
194217 struct sched_table *s = pi->sched_tbl;
195
- struct sched_class *e;
196218 struct sched_queue_entry *qe = NULL;
219
+ struct adapter *adap = pi->adapter;
197220 struct sge_eth_txq *txq;
221
+ struct sched_class *e;
198222 unsigned int qid;
199223 int err = 0;
200224
....@@ -210,29 +234,90 @@
210234
211235 /* Unbind queue from any existing class */
212236 err = t4_sched_queue_unbind(pi, p);
213
- if (err) {
214
- kvfree(qe);
215
- goto out;
216
- }
237
+ if (err)
238
+ goto out_err;
217239
218240 /* Bind queue to specified class */
219
- memset(qe, 0, sizeof(*qe));
220241 qe->cntxt_id = qid;
221242 memcpy(&qe->param, p, sizeof(qe->param));
222243
223244 e = &s->tab[qe->param.class];
224
- spin_lock(&e->lock);
225245 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
226
- if (err) {
227
- kvfree(qe);
228
- spin_unlock(&e->lock);
229
- goto out;
230
- }
246
+ if (err)
247
+ goto out_err;
231248
232
- list_add_tail(&qe->list, &e->queue_list);
249
+ list_add_tail(&qe->list, &e->entry_list);
250
+ e->bind_type = SCHED_QUEUE;
233251 atomic_inc(&e->refcnt);
234
- spin_unlock(&e->lock);
235
-out:
252
+ return err;
253
+
254
+out_err:
255
+ kvfree(qe);
256
+ return err;
257
+}
258
+
259
+static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
260
+{
261
+ struct sched_flowc_entry *fe = NULL;
262
+ struct adapter *adap = pi->adapter;
263
+ struct sched_class *e;
264
+ int err = 0;
265
+
266
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
267
+ return -ERANGE;
268
+
269
+ /* Find the existing entry that the flowc is bound to */
270
+ fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
271
+ if (fe) {
272
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
273
+ false);
274
+ if (err)
275
+ return err;
276
+
277
+ e = &pi->sched_tbl->tab[fe->param.class];
278
+ list_del(&fe->list);
279
+ kvfree(fe);
280
+ if (atomic_dec_and_test(&e->refcnt))
281
+ cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
282
+ }
283
+ return err;
284
+}
285
+
286
+static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
287
+{
288
+ struct sched_table *s = pi->sched_tbl;
289
+ struct sched_flowc_entry *fe = NULL;
290
+ struct adapter *adap = pi->adapter;
291
+ struct sched_class *e;
292
+ int err = 0;
293
+
294
+ if (p->tid < 0 || p->tid >= adap->tids.neotids)
295
+ return -ERANGE;
296
+
297
+ fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
298
+ if (!fe)
299
+ return -ENOMEM;
300
+
301
+ /* Unbind flowc from any existing class */
302
+ err = t4_sched_flowc_unbind(pi, p);
303
+ if (err)
304
+ goto out_err;
305
+
306
+ /* Bind flowc to specified class */
307
+ memcpy(&fe->param, p, sizeof(fe->param));
308
+
309
+ e = &s->tab[fe->param.class];
310
+ err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
311
+ if (err)
312
+ goto out_err;
313
+
314
+ list_add_tail(&fe->list, &e->entry_list);
315
+ e->bind_type = SCHED_FLOWC;
316
+ atomic_inc(&e->refcnt);
317
+ return err;
318
+
319
+out_err:
320
+ kvfree(fe);
236321 return err;
237322 }
238323
....@@ -247,8 +332,15 @@
247332 case SCHED_QUEUE: {
248333 struct sched_queue_entry *qe;
249334
250
- list_for_each_entry(qe, &e->queue_list, list)
335
+ list_for_each_entry(qe, &e->entry_list, list)
251336 t4_sched_queue_unbind(pi, &qe->param);
337
+ break;
338
+ }
339
+ case SCHED_FLOWC: {
340
+ struct sched_flowc_entry *fe;
341
+
342
+ list_for_each_entry(fe, &e->entry_list, list)
343
+ t4_sched_flowc_unbind(pi, &fe->param);
252344 break;
253345 }
254346 default:
....@@ -274,6 +366,15 @@
274366 err = t4_sched_queue_unbind(pi, qe);
275367 break;
276368 }
369
+ case SCHED_FLOWC: {
370
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
371
+
372
+ if (bind)
373
+ err = t4_sched_flowc_bind(pi, fe);
374
+ else
375
+ err = t4_sched_flowc_unbind(pi, fe);
376
+ break;
377
+ }
277378 default:
278379 err = -ENOTSUPP;
279380 break;
....@@ -296,8 +397,6 @@
296397 enum sched_bind_type type)
297398 {
298399 struct port_info *pi = netdev2pinfo(dev);
299
- struct sched_table *s;
300
- int err = 0;
301400 u8 class_id;
302401
303402 if (!can_sched(dev))
....@@ -313,6 +412,12 @@
313412 class_id = qe->class;
314413 break;
315414 }
415
+ case SCHED_FLOWC: {
416
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
417
+
418
+ class_id = fe->class;
419
+ break;
420
+ }
316421 default:
317422 return -ENOTSUPP;
318423 }
....@@ -323,12 +428,8 @@
323428 if (class_id == SCHED_CLS_NONE)
324429 return -ENOTSUPP;
325430
326
- s = pi->sched_tbl;
327
- write_lock(&s->rw_lock);
328
- err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
329
- write_unlock(&s->rw_lock);
431
+ return t4_sched_class_bind_unbind_op(pi, arg, type, true);
330432
331
- return err;
332433 }
333434
334435 /**
....@@ -343,8 +444,6 @@
343444 enum sched_bind_type type)
344445 {
345446 struct port_info *pi = netdev2pinfo(dev);
346
- struct sched_table *s;
347
- int err = 0;
348447 u8 class_id;
349448
350449 if (!can_sched(dev))
....@@ -360,6 +459,12 @@
360459 class_id = qe->class;
361460 break;
362461 }
462
+ case SCHED_FLOWC: {
463
+ struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
464
+
465
+ class_id = fe->class;
466
+ break;
467
+ }
363468 default:
364469 return -ENOTSUPP;
365470 }
....@@ -367,12 +472,7 @@
367472 if (!valid_class_id(dev, class_id))
368473 return -EINVAL;
369474
370
- s = pi->sched_tbl;
371
- write_lock(&s->rw_lock);
372
- err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
373
- write_unlock(&s->rw_lock);
374
-
375
- return err;
475
+ return t4_sched_class_bind_unbind_op(pi, arg, type, false);
376476 }
377477
378478 /* If @p is NULL, fetch any available unused class */
....@@ -380,8 +480,8 @@
380480 const struct ch_sched_params *p)
381481 {
382482 struct sched_table *s = pi->sched_tbl;
383
- struct sched_class *e, *end;
384483 struct sched_class *found = NULL;
484
+ struct sched_class *e, *end;
385485
386486 if (!p) {
387487 /* Get any available unused class */
....@@ -425,8 +525,7 @@
425525 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
426526 struct ch_sched_params *p)
427527 {
428
- struct sched_table *s = pi->sched_tbl;
429
- struct sched_class *e;
528
+ struct sched_class *e = NULL;
430529 u8 class_id;
431530 int err;
432531
....@@ -441,38 +540,32 @@
441540 if (class_id != SCHED_CLS_NONE)
442541 return NULL;
443542
444
- write_lock(&s->rw_lock);
445
- /* See if there's an exisiting class with same
446
- * requested sched params
543
+ /* See if there's an exisiting class with same requested sched
544
+ * params. Classes can only be shared among FLOWC types. For
545
+ * other types, always request a new class.
447546 */
448
- e = t4_sched_class_lookup(pi, p);
547
+ if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
548
+ e = t4_sched_class_lookup(pi, p);
549
+
449550 if (!e) {
450551 struct ch_sched_params np;
451552
452553 /* Fetch any available unused class */
453554 e = t4_sched_class_lookup(pi, NULL);
454555 if (!e)
455
- goto out;
556
+ return NULL;
456557
457558 memcpy(&np, p, sizeof(np));
458559 np.u.params.class = e->idx;
459
-
460
- spin_lock(&e->lock);
461560 /* New class */
462561 err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
463
- if (err) {
464
- spin_unlock(&e->lock);
465
- e = NULL;
466
- goto out;
467
- }
562
+ if (err)
563
+ return NULL;
468564 memcpy(&e->info, &np, sizeof(e->info));
469565 atomic_set(&e->refcnt, 0);
470566 e->state = SCHED_STATE_ACTIVE;
471
- spin_unlock(&e->lock);
472567 }
473568
474
-out:
475
- write_unlock(&s->rw_lock);
476569 return e;
477570 }
478571
....@@ -502,9 +595,57 @@
502595 return t4_sched_class_alloc(pi, p);
503596 }
504597
505
-static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
598
+/**
599
+ * cxgb4_sched_class_free - free a scheduling class
600
+ * @dev: net_device pointer
601
+ * @classid: scheduling class id to free
602
+ *
603
+ * Frees a scheduling class if there are no users.
604
+ */
605
+void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
506606 {
507
- t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
607
+ struct port_info *pi = netdev2pinfo(dev);
608
+ struct sched_table *s = pi->sched_tbl;
609
+ struct ch_sched_params p;
610
+ struct sched_class *e;
611
+ u32 speed;
612
+ int ret;
613
+
614
+ e = &s->tab[classid];
615
+ if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
616
+ /* Port based rate limiting needs explicit reset back
617
+ * to max rate. But, we'll do explicit reset for all
618
+ * types, instead of just port based type, to be on
619
+ * the safer side.
620
+ */
621
+ memcpy(&p, &e->info, sizeof(p));
622
+ /* Always reset mode to 0. Otherwise, FLOWC mode will
623
+ * still be enabled even after resetting the traffic
624
+ * class.
625
+ */
626
+ p.u.params.mode = 0;
627
+ p.u.params.minrate = 0;
628
+ p.u.params.pktsize = 0;
629
+
630
+ ret = t4_get_link_params(pi, NULL, &speed, NULL);
631
+ if (!ret)
632
+ p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
633
+ else
634
+ p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
635
+
636
+ t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
637
+
638
+ e->state = SCHED_STATE_UNUSED;
639
+ memset(&e->info, 0, sizeof(e->info));
640
+ }
641
+}
642
+
643
+static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
644
+{
645
+ struct port_info *pi = netdev2pinfo(dev);
646
+
647
+ t4_sched_class_unbind_all(pi, e, e->bind_type);
648
+ cxgb4_sched_class_free(dev, e->idx);
508649 }
509650
510651 struct sched_table *t4_init_sched(unsigned int sched_size)
....@@ -512,19 +653,17 @@
512653 struct sched_table *s;
513654 unsigned int i;
514655
515
- s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
656
+ s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
516657 if (!s)
517658 return NULL;
518659
519660 s->sched_size = sched_size;
520
- rwlock_init(&s->rw_lock);
521661
522662 for (i = 0; i < s->sched_size; i++) {
523663 memset(&s->tab[i], 0, sizeof(struct sched_class));
524664 s->tab[i].idx = i;
525665 s->tab[i].state = SCHED_STATE_UNUSED;
526
- INIT_LIST_HEAD(&s->tab[i].queue_list);
527
- spin_lock_init(&s->tab[i].lock);
666
+ INIT_LIST_HEAD(&s->tab[i].entry_list);
528667 atomic_set(&s->tab[i].refcnt, 0);
529668 }
530669 return s;
....@@ -545,11 +684,9 @@
545684 for (i = 0; i < s->sched_size; i++) {
546685 struct sched_class *e;
547686
548
- write_lock(&s->rw_lock);
549687 e = &s->tab[i];
550688 if (e->state == SCHED_STATE_ACTIVE)
551
- t4_sched_class_free(pi, e);
552
- write_unlock(&s->rw_lock);
689
+ t4_sched_class_free(adap->port[j], e);
553690 }
554691 kvfree(s);
555692 }