1 // SPDX-License-Identifier: GPL-2.0
2
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
4 *
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6 *
7 */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
24 #include <net/sock.h>
25 #include <net/tcp.h>
26
27 static LIST_HEAD(taprio_list);
28 static DEFINE_SPINLOCK(taprio_list_lock);
29
30 #define TAPRIO_ALL_GATES_OPEN -1
31
32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
34 #define TAPRIO_FLAGS_INVALID U32_MAX
35
36 struct sched_entry {
37 struct list_head list;
38
39 /* The instant that this entry "closes" and the next one
40 * should open, the qdisc will make some effort so that no
41 * packet leaves after this time.
42 */
43 ktime_t close_time;
44 ktime_t next_txtime;
45 atomic_t budget;
46 int index;
47 u32 gate_mask;
48 u32 interval;
49 u8 command;
50 };
51
52 struct sched_gate_list {
53 struct rcu_head rcu;
54 struct list_head entries;
55 size_t num_entries;
56 ktime_t cycle_close_time;
57 s64 cycle_time;
58 s64 cycle_time_extension;
59 s64 base_time;
60 };
61
62 struct taprio_sched {
63 struct Qdisc **qdiscs;
64 struct Qdisc *root;
65 u32 flags;
66 enum tk_offsets tk_offset;
67 int clockid;
68 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
69 * speeds it's sub-nanoseconds per byte
70 */
71
72 /* Protects the update side of the RCU protected current_entry */
73 spinlock_t current_entry_lock;
74 struct sched_entry __rcu *current_entry;
75 struct sched_gate_list __rcu *oper_sched;
76 struct sched_gate_list __rcu *admin_sched;
77 struct hrtimer advance_timer;
78 struct list_head taprio_list;
79 struct sk_buff *(*dequeue)(struct Qdisc *sch);
80 struct sk_buff *(*peek)(struct Qdisc *sch);
81 u32 txtime_delay;
82 };
83
84 struct __tc_taprio_qopt_offload {
85 refcount_t users;
86 struct tc_taprio_qopt_offload offload;
87 };
88
sched_base_time(const struct sched_gate_list * sched)89 static ktime_t sched_base_time(const struct sched_gate_list *sched)
90 {
91 if (!sched)
92 return KTIME_MAX;
93
94 return ns_to_ktime(sched->base_time);
95 }
96
taprio_get_time(struct taprio_sched * q)97 static ktime_t taprio_get_time(struct taprio_sched *q)
98 {
99 ktime_t mono = ktime_get();
100
101 switch (q->tk_offset) {
102 case TK_OFFS_MAX:
103 return mono;
104 default:
105 return ktime_mono_to_any(mono, q->tk_offset);
106 }
107
108 return KTIME_MAX;
109 }
110
taprio_free_sched_cb(struct rcu_head * head)111 static void taprio_free_sched_cb(struct rcu_head *head)
112 {
113 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
114 struct sched_entry *entry, *n;
115
116 if (!sched)
117 return;
118
119 list_for_each_entry_safe(entry, n, &sched->entries, list) {
120 list_del(&entry->list);
121 kfree(entry);
122 }
123
124 kfree(sched);
125 }
126
switch_schedules(struct taprio_sched * q,struct sched_gate_list ** admin,struct sched_gate_list ** oper)127 static void switch_schedules(struct taprio_sched *q,
128 struct sched_gate_list **admin,
129 struct sched_gate_list **oper)
130 {
131 rcu_assign_pointer(q->oper_sched, *admin);
132 rcu_assign_pointer(q->admin_sched, NULL);
133
134 if (*oper)
135 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
136
137 *oper = *admin;
138 *admin = NULL;
139 }
140
141 /* Get how much time has been already elapsed in the current cycle. */
get_cycle_time_elapsed(struct sched_gate_list * sched,ktime_t time)142 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
143 {
144 ktime_t time_since_sched_start;
145 s32 time_elapsed;
146
147 time_since_sched_start = ktime_sub(time, sched->base_time);
148 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
149
150 return time_elapsed;
151 }
152
get_interval_end_time(struct sched_gate_list * sched,struct sched_gate_list * admin,struct sched_entry * entry,ktime_t intv_start)153 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
154 struct sched_gate_list *admin,
155 struct sched_entry *entry,
156 ktime_t intv_start)
157 {
158 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
159 ktime_t intv_end, cycle_ext_end, cycle_end;
160
161 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
162 intv_end = ktime_add_ns(intv_start, entry->interval);
163 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
164
165 if (ktime_before(intv_end, cycle_end))
166 return intv_end;
167 else if (admin && admin != sched &&
168 ktime_after(admin->base_time, cycle_end) &&
169 ktime_before(admin->base_time, cycle_ext_end))
170 return admin->base_time;
171 else
172 return cycle_end;
173 }
174
length_to_duration(struct taprio_sched * q,int len)175 static int length_to_duration(struct taprio_sched *q, int len)
176 {
177 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
178 }
179
180 /* Returns the entry corresponding to next available interval. If
181 * validate_interval is set, it only validates whether the timestamp occurs
182 * when the gate corresponding to the skb's traffic class is open.
183 */
find_entry_to_transmit(struct sk_buff * skb,struct Qdisc * sch,struct sched_gate_list * sched,struct sched_gate_list * admin,ktime_t time,ktime_t * interval_start,ktime_t * interval_end,bool validate_interval)184 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
185 struct Qdisc *sch,
186 struct sched_gate_list *sched,
187 struct sched_gate_list *admin,
188 ktime_t time,
189 ktime_t *interval_start,
190 ktime_t *interval_end,
191 bool validate_interval)
192 {
193 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
194 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
195 struct sched_entry *entry = NULL, *entry_found = NULL;
196 struct taprio_sched *q = qdisc_priv(sch);
197 struct net_device *dev = qdisc_dev(sch);
198 bool entry_available = false;
199 s32 cycle_elapsed;
200 int tc, n;
201
202 tc = netdev_get_prio_tc_map(dev, skb->priority);
203 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
204
205 *interval_start = 0;
206 *interval_end = 0;
207
208 if (!sched)
209 return NULL;
210
211 cycle = sched->cycle_time;
212 cycle_elapsed = get_cycle_time_elapsed(sched, time);
213 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
214 cycle_end = ktime_add_ns(curr_intv_end, cycle);
215
216 list_for_each_entry(entry, &sched->entries, list) {
217 curr_intv_start = curr_intv_end;
218 curr_intv_end = get_interval_end_time(sched, admin, entry,
219 curr_intv_start);
220
221 if (ktime_after(curr_intv_start, cycle_end))
222 break;
223
224 if (!(entry->gate_mask & BIT(tc)) ||
225 packet_transmit_time > entry->interval)
226 continue;
227
228 txtime = entry->next_txtime;
229
230 if (ktime_before(txtime, time) || validate_interval) {
231 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
232 if ((ktime_before(curr_intv_start, time) &&
233 ktime_before(transmit_end_time, curr_intv_end)) ||
234 (ktime_after(curr_intv_start, time) && !validate_interval)) {
235 entry_found = entry;
236 *interval_start = curr_intv_start;
237 *interval_end = curr_intv_end;
238 break;
239 } else if (!entry_available && !validate_interval) {
240 /* Here, we are just trying to find out the
241 * first available interval in the next cycle.
242 */
243 entry_available = 1;
244 entry_found = entry;
245 *interval_start = ktime_add_ns(curr_intv_start, cycle);
246 *interval_end = ktime_add_ns(curr_intv_end, cycle);
247 }
248 } else if (ktime_before(txtime, earliest_txtime) &&
249 !entry_available) {
250 earliest_txtime = txtime;
251 entry_found = entry;
252 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
253 *interval_start = ktime_add(curr_intv_start, n * cycle);
254 *interval_end = ktime_add(curr_intv_end, n * cycle);
255 }
256 }
257
258 return entry_found;
259 }
260
is_valid_interval(struct sk_buff * skb,struct Qdisc * sch)261 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
262 {
263 struct taprio_sched *q = qdisc_priv(sch);
264 struct sched_gate_list *sched, *admin;
265 ktime_t interval_start, interval_end;
266 struct sched_entry *entry;
267
268 rcu_read_lock();
269 sched = rcu_dereference(q->oper_sched);
270 admin = rcu_dereference(q->admin_sched);
271
272 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
273 &interval_start, &interval_end, true);
274 rcu_read_unlock();
275
276 return entry;
277 }
278
taprio_flags_valid(u32 flags)279 static bool taprio_flags_valid(u32 flags)
280 {
281 /* Make sure no other flag bits are set. */
282 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
283 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
284 return false;
285 /* txtime-assist and full offload are mutually exclusive */
286 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
287 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
288 return false;
289 return true;
290 }
291
292 /* This returns the tstamp value set by TCP in terms of the set clock. */
get_tcp_tstamp(struct taprio_sched * q,struct sk_buff * skb)293 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
294 {
295 unsigned int offset = skb_network_offset(skb);
296 const struct ipv6hdr *ipv6h;
297 const struct iphdr *iph;
298 struct ipv6hdr _ipv6h;
299
300 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
301 if (!ipv6h)
302 return 0;
303
304 if (ipv6h->version == 4) {
305 iph = (struct iphdr *)ipv6h;
306 offset += iph->ihl * 4;
307
308 /* special-case 6in4 tunnelling, as that is a common way to get
309 * v6 connectivity in the home
310 */
311 if (iph->protocol == IPPROTO_IPV6) {
312 ipv6h = skb_header_pointer(skb, offset,
313 sizeof(_ipv6h), &_ipv6h);
314
315 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
316 return 0;
317 } else if (iph->protocol != IPPROTO_TCP) {
318 return 0;
319 }
320 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
321 return 0;
322 }
323
324 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
325 }
326
327 /* There are a few scenarios where we will have to modify the txtime from
328 * what is read from next_txtime in sched_entry. They are:
329 * 1. If txtime is in the past,
330 * a. The gate for the traffic class is currently open and packet can be
331 * transmitted before it closes, schedule the packet right away.
332 * b. If the gate corresponding to the traffic class is going to open later
333 * in the cycle, set the txtime of packet to the interval start.
334 * 2. If txtime is in the future, there are packets corresponding to the
335 * current traffic class waiting to be transmitted. So, the following
336 * possibilities exist:
337 * a. We can transmit the packet before the window containing the txtime
338 * closes.
339 * b. The window might close before the transmission can be completed
340 * successfully. So, schedule the packet in the next open window.
341 */
get_packet_txtime(struct sk_buff * skb,struct Qdisc * sch)342 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
343 {
344 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
345 struct taprio_sched *q = qdisc_priv(sch);
346 struct sched_gate_list *sched, *admin;
347 ktime_t minimum_time, now, txtime;
348 int len, packet_transmit_time;
349 struct sched_entry *entry;
350 bool sched_changed;
351
352 now = taprio_get_time(q);
353 minimum_time = ktime_add_ns(now, q->txtime_delay);
354
355 tcp_tstamp = get_tcp_tstamp(q, skb);
356 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
357
358 rcu_read_lock();
359 admin = rcu_dereference(q->admin_sched);
360 sched = rcu_dereference(q->oper_sched);
361 if (admin && ktime_after(minimum_time, admin->base_time))
362 switch_schedules(q, &admin, &sched);
363
364 /* Until the schedule starts, all the queues are open */
365 if (!sched || ktime_before(minimum_time, sched->base_time)) {
366 txtime = minimum_time;
367 goto done;
368 }
369
370 len = qdisc_pkt_len(skb);
371 packet_transmit_time = length_to_duration(q, len);
372
373 do {
374 sched_changed = 0;
375
376 entry = find_entry_to_transmit(skb, sch, sched, admin,
377 minimum_time,
378 &interval_start, &interval_end,
379 false);
380 if (!entry) {
381 txtime = 0;
382 goto done;
383 }
384
385 txtime = entry->next_txtime;
386 txtime = max_t(ktime_t, txtime, minimum_time);
387 txtime = max_t(ktime_t, txtime, interval_start);
388
389 if (admin && admin != sched &&
390 ktime_after(txtime, admin->base_time)) {
391 sched = admin;
392 sched_changed = 1;
393 continue;
394 }
395
396 transmit_end_time = ktime_add(txtime, packet_transmit_time);
397 minimum_time = transmit_end_time;
398
399 /* Update the txtime of current entry to the next time it's
400 * interval starts.
401 */
402 if (ktime_after(transmit_end_time, interval_end))
403 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
404 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
405
406 entry->next_txtime = transmit_end_time;
407
408 done:
409 rcu_read_unlock();
410 return txtime;
411 }
412
taprio_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)413 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
414 struct sk_buff **to_free)
415 {
416 struct taprio_sched *q = qdisc_priv(sch);
417 struct Qdisc *child;
418 int queue;
419
420 queue = skb_get_queue_mapping(skb);
421
422 child = q->qdiscs[queue];
423 if (unlikely(!child))
424 return qdisc_drop(skb, sch, to_free);
425
426 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
427 if (!is_valid_interval(skb, sch))
428 return qdisc_drop(skb, sch, to_free);
429 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
430 skb->tstamp = get_packet_txtime(skb, sch);
431 if (!skb->tstamp)
432 return qdisc_drop(skb, sch, to_free);
433 }
434
435 qdisc_qstats_backlog_inc(sch, skb);
436 sch->q.qlen++;
437
438 return qdisc_enqueue(skb, child, to_free);
439 }
440
taprio_peek_soft(struct Qdisc * sch)441 static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
442 {
443 struct taprio_sched *q = qdisc_priv(sch);
444 struct net_device *dev = qdisc_dev(sch);
445 struct sched_entry *entry;
446 struct sk_buff *skb;
447 u32 gate_mask;
448 int i;
449
450 rcu_read_lock();
451 entry = rcu_dereference(q->current_entry);
452 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
453 rcu_read_unlock();
454
455 if (!gate_mask)
456 return NULL;
457
458 for (i = 0; i < dev->num_tx_queues; i++) {
459 struct Qdisc *child = q->qdiscs[i];
460 int prio;
461 u8 tc;
462
463 if (unlikely(!child))
464 continue;
465
466 skb = child->ops->peek(child);
467 if (!skb)
468 continue;
469
470 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
471 return skb;
472
473 prio = skb->priority;
474 tc = netdev_get_prio_tc_map(dev, prio);
475
476 if (!(gate_mask & BIT(tc)))
477 continue;
478
479 return skb;
480 }
481
482 return NULL;
483 }
484
taprio_peek_offload(struct Qdisc * sch)485 static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
486 {
487 struct taprio_sched *q = qdisc_priv(sch);
488 struct net_device *dev = qdisc_dev(sch);
489 struct sk_buff *skb;
490 int i;
491
492 for (i = 0; i < dev->num_tx_queues; i++) {
493 struct Qdisc *child = q->qdiscs[i];
494
495 if (unlikely(!child))
496 continue;
497
498 skb = child->ops->peek(child);
499 if (!skb)
500 continue;
501
502 return skb;
503 }
504
505 return NULL;
506 }
507
taprio_peek(struct Qdisc * sch)508 static struct sk_buff *taprio_peek(struct Qdisc *sch)
509 {
510 struct taprio_sched *q = qdisc_priv(sch);
511
512 return q->peek(sch);
513 }
514
taprio_set_budget(struct taprio_sched * q,struct sched_entry * entry)515 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
516 {
517 atomic_set(&entry->budget,
518 div64_u64((u64)entry->interval * 1000,
519 atomic64_read(&q->picos_per_byte)));
520 }
521
taprio_dequeue_soft(struct Qdisc * sch)522 static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
523 {
524 struct taprio_sched *q = qdisc_priv(sch);
525 struct net_device *dev = qdisc_dev(sch);
526 struct sk_buff *skb = NULL;
527 struct sched_entry *entry;
528 u32 gate_mask;
529 int i;
530
531 rcu_read_lock();
532 entry = rcu_dereference(q->current_entry);
533 /* if there's no entry, it means that the schedule didn't
534 * start yet, so force all gates to be open, this is in
535 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
536 * "AdminGateSates"
537 */
538 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
539
540 if (!gate_mask)
541 goto done;
542
543 for (i = 0; i < dev->num_tx_queues; i++) {
544 struct Qdisc *child = q->qdiscs[i];
545 ktime_t guard;
546 int prio;
547 int len;
548 u8 tc;
549
550 if (unlikely(!child))
551 continue;
552
553 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
554 skb = child->ops->dequeue(child);
555 if (!skb)
556 continue;
557 goto skb_found;
558 }
559
560 skb = child->ops->peek(child);
561 if (!skb)
562 continue;
563
564 prio = skb->priority;
565 tc = netdev_get_prio_tc_map(dev, prio);
566
567 if (!(gate_mask & BIT(tc))) {
568 skb = NULL;
569 continue;
570 }
571
572 len = qdisc_pkt_len(skb);
573 guard = ktime_add_ns(taprio_get_time(q),
574 length_to_duration(q, len));
575
576 /* In the case that there's no gate entry, there's no
577 * guard band ...
578 */
579 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
580 ktime_after(guard, entry->close_time)) {
581 skb = NULL;
582 continue;
583 }
584
585 /* ... and no budget. */
586 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
587 atomic_sub_return(len, &entry->budget) < 0) {
588 skb = NULL;
589 continue;
590 }
591
592 skb = child->ops->dequeue(child);
593 if (unlikely(!skb))
594 goto done;
595
596 skb_found:
597 qdisc_bstats_update(sch, skb);
598 qdisc_qstats_backlog_dec(sch, skb);
599 sch->q.qlen--;
600
601 goto done;
602 }
603
604 done:
605 rcu_read_unlock();
606
607 return skb;
608 }
609
taprio_dequeue_offload(struct Qdisc * sch)610 static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
611 {
612 struct taprio_sched *q = qdisc_priv(sch);
613 struct net_device *dev = qdisc_dev(sch);
614 struct sk_buff *skb;
615 int i;
616
617 for (i = 0; i < dev->num_tx_queues; i++) {
618 struct Qdisc *child = q->qdiscs[i];
619
620 if (unlikely(!child))
621 continue;
622
623 skb = child->ops->dequeue(child);
624 if (unlikely(!skb))
625 continue;
626
627 qdisc_bstats_update(sch, skb);
628 qdisc_qstats_backlog_dec(sch, skb);
629 sch->q.qlen--;
630
631 return skb;
632 }
633
634 return NULL;
635 }
636
taprio_dequeue(struct Qdisc * sch)637 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
638 {
639 struct taprio_sched *q = qdisc_priv(sch);
640
641 return q->dequeue(sch);
642 }
643
should_restart_cycle(const struct sched_gate_list * oper,const struct sched_entry * entry)644 static bool should_restart_cycle(const struct sched_gate_list *oper,
645 const struct sched_entry *entry)
646 {
647 if (list_is_last(&entry->list, &oper->entries))
648 return true;
649
650 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
651 return true;
652
653 return false;
654 }
655
should_change_schedules(const struct sched_gate_list * admin,const struct sched_gate_list * oper,ktime_t close_time)656 static bool should_change_schedules(const struct sched_gate_list *admin,
657 const struct sched_gate_list *oper,
658 ktime_t close_time)
659 {
660 ktime_t next_base_time, extension_time;
661
662 if (!admin)
663 return false;
664
665 next_base_time = sched_base_time(admin);
666
667 /* This is the simple case, the close_time would fall after
668 * the next schedule base_time.
669 */
670 if (ktime_compare(next_base_time, close_time) <= 0)
671 return true;
672
673 /* This is the cycle_time_extension case, if the close_time
674 * plus the amount that can be extended would fall after the
675 * next schedule base_time, we can extend the current schedule
676 * for that amount.
677 */
678 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
679
680 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
681 * how precisely the extension should be made. So after
682 * conformance testing, this logic may change.
683 */
684 if (ktime_compare(next_base_time, extension_time) <= 0)
685 return true;
686
687 return false;
688 }
689
advance_sched(struct hrtimer * timer)690 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
691 {
692 struct taprio_sched *q = container_of(timer, struct taprio_sched,
693 advance_timer);
694 struct sched_gate_list *oper, *admin;
695 struct sched_entry *entry, *next;
696 struct Qdisc *sch = q->root;
697 ktime_t close_time;
698
699 spin_lock(&q->current_entry_lock);
700 entry = rcu_dereference_protected(q->current_entry,
701 lockdep_is_held(&q->current_entry_lock));
702 oper = rcu_dereference_protected(q->oper_sched,
703 lockdep_is_held(&q->current_entry_lock));
704 admin = rcu_dereference_protected(q->admin_sched,
705 lockdep_is_held(&q->current_entry_lock));
706
707 if (!oper)
708 switch_schedules(q, &admin, &oper);
709
710 /* This can happen in two cases: 1. this is the very first run
711 * of this function (i.e. we weren't running any schedule
712 * previously); 2. The previous schedule just ended. The first
713 * entry of all schedules are pre-calculated during the
714 * schedule initialization.
715 */
716 if (unlikely(!entry || entry->close_time == oper->base_time)) {
717 next = list_first_entry(&oper->entries, struct sched_entry,
718 list);
719 close_time = next->close_time;
720 goto first_run;
721 }
722
723 if (should_restart_cycle(oper, entry)) {
724 next = list_first_entry(&oper->entries, struct sched_entry,
725 list);
726 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
727 oper->cycle_time);
728 } else {
729 next = list_next_entry(entry, list);
730 }
731
732 close_time = ktime_add_ns(entry->close_time, next->interval);
733 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
734
735 if (should_change_schedules(admin, oper, close_time)) {
736 /* Set things so the next time this runs, the new
737 * schedule runs.
738 */
739 close_time = sched_base_time(admin);
740 switch_schedules(q, &admin, &oper);
741 }
742
743 next->close_time = close_time;
744 taprio_set_budget(q, next);
745
746 first_run:
747 rcu_assign_pointer(q->current_entry, next);
748 spin_unlock(&q->current_entry_lock);
749
750 hrtimer_set_expires(&q->advance_timer, close_time);
751
752 rcu_read_lock();
753 __netif_schedule(sch);
754 rcu_read_unlock();
755
756 return HRTIMER_RESTART;
757 }
758
759 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
760 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
761 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
762 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
763 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
764 };
765
766 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
767 [TCA_TAPRIO_ATTR_PRIOMAP] = {
768 .len = sizeof(struct tc_mqprio_qopt)
769 },
770 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
771 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
772 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
773 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
774 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
775 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
776 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
777 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
778 };
779
fill_sched_entry(struct taprio_sched * q,struct nlattr ** tb,struct sched_entry * entry,struct netlink_ext_ack * extack)780 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
781 struct sched_entry *entry,
782 struct netlink_ext_ack *extack)
783 {
784 int min_duration = length_to_duration(q, ETH_ZLEN);
785 u32 interval = 0;
786
787 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
788 entry->command = nla_get_u8(
789 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
790
791 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
792 entry->gate_mask = nla_get_u32(
793 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
794
795 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
796 interval = nla_get_u32(
797 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
798
799 /* The interval should allow at least the minimum ethernet
800 * frame to go out.
801 */
802 if (interval < min_duration) {
803 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
804 return -EINVAL;
805 }
806
807 entry->interval = interval;
808
809 return 0;
810 }
811
parse_sched_entry(struct taprio_sched * q,struct nlattr * n,struct sched_entry * entry,int index,struct netlink_ext_ack * extack)812 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
813 struct sched_entry *entry, int index,
814 struct netlink_ext_ack *extack)
815 {
816 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
817 int err;
818
819 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
820 entry_policy, NULL);
821 if (err < 0) {
822 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
823 return -EINVAL;
824 }
825
826 entry->index = index;
827
828 return fill_sched_entry(q, tb, entry, extack);
829 }
830
parse_sched_list(struct taprio_sched * q,struct nlattr * list,struct sched_gate_list * sched,struct netlink_ext_ack * extack)831 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
832 struct sched_gate_list *sched,
833 struct netlink_ext_ack *extack)
834 {
835 struct nlattr *n;
836 int err, rem;
837 int i = 0;
838
839 if (!list)
840 return -EINVAL;
841
842 nla_for_each_nested(n, list, rem) {
843 struct sched_entry *entry;
844
845 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
846 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
847 continue;
848 }
849
850 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
851 if (!entry) {
852 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
853 return -ENOMEM;
854 }
855
856 err = parse_sched_entry(q, n, entry, i, extack);
857 if (err < 0) {
858 kfree(entry);
859 return err;
860 }
861
862 list_add_tail(&entry->list, &sched->entries);
863 i++;
864 }
865
866 sched->num_entries = i;
867
868 return i;
869 }
870
parse_taprio_schedule(struct taprio_sched * q,struct nlattr ** tb,struct sched_gate_list * new,struct netlink_ext_ack * extack)871 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
872 struct sched_gate_list *new,
873 struct netlink_ext_ack *extack)
874 {
875 int err = 0;
876
877 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
878 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
879 return -ENOTSUPP;
880 }
881
882 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
883 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
884
885 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
886 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
887
888 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
889 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
890
891 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
892 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
893 new, extack);
894 if (err < 0)
895 return err;
896
897 if (!new->cycle_time) {
898 struct sched_entry *entry;
899 ktime_t cycle = 0;
900
901 list_for_each_entry(entry, &new->entries, list)
902 cycle = ktime_add_ns(cycle, entry->interval);
903 new->cycle_time = cycle;
904 }
905
906 return 0;
907 }
908
taprio_parse_mqprio_opt(struct net_device * dev,struct tc_mqprio_qopt * qopt,struct netlink_ext_ack * extack,u32 taprio_flags)909 static int taprio_parse_mqprio_opt(struct net_device *dev,
910 struct tc_mqprio_qopt *qopt,
911 struct netlink_ext_ack *extack,
912 u32 taprio_flags)
913 {
914 int i, j;
915
916 if (!qopt && !dev->num_tc) {
917 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
918 return -EINVAL;
919 }
920
921 /* If num_tc is already set, it means that the user already
922 * configured the mqprio part
923 */
924 if (dev->num_tc)
925 return 0;
926
927 /* Verify num_tc is not out of max range */
928 if (qopt->num_tc > TC_MAX_QUEUE) {
929 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
930 return -EINVAL;
931 }
932
933 /* taprio imposes that traffic classes map 1:n to tx queues */
934 if (qopt->num_tc > dev->num_tx_queues) {
935 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
936 return -EINVAL;
937 }
938
939 /* Verify priority mapping uses valid tcs */
940 for (i = 0; i <= TC_BITMASK; i++) {
941 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
942 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
943 return -EINVAL;
944 }
945 }
946
947 for (i = 0; i < qopt->num_tc; i++) {
948 unsigned int last = qopt->offset[i] + qopt->count[i];
949
950 /* Verify the queue count is in tx range being equal to the
951 * real_num_tx_queues indicates the last queue is in use.
952 */
953 if (qopt->offset[i] >= dev->num_tx_queues ||
954 !qopt->count[i] ||
955 last > dev->real_num_tx_queues) {
956 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
957 return -EINVAL;
958 }
959
960 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
961 continue;
962
963 /* Verify that the offset and counts do not overlap */
964 for (j = i + 1; j < qopt->num_tc; j++) {
965 if (last > qopt->offset[j]) {
966 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
967 return -EINVAL;
968 }
969 }
970 }
971
972 return 0;
973 }
974
taprio_get_start_time(struct Qdisc * sch,struct sched_gate_list * sched,ktime_t * start)975 static int taprio_get_start_time(struct Qdisc *sch,
976 struct sched_gate_list *sched,
977 ktime_t *start)
978 {
979 struct taprio_sched *q = qdisc_priv(sch);
980 ktime_t now, base, cycle;
981 s64 n;
982
983 base = sched_base_time(sched);
984 now = taprio_get_time(q);
985
986 if (ktime_after(base, now)) {
987 *start = base;
988 return 0;
989 }
990
991 cycle = sched->cycle_time;
992
993 /* The qdisc is expected to have at least one sched_entry. Moreover,
994 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
995 * something went really wrong. In that case, we should warn about this
996 * inconsistent state and return error.
997 */
998 if (WARN_ON(!cycle))
999 return -EFAULT;
1000
1001 /* Schedule the start time for the beginning of the next
1002 * cycle.
1003 */
1004 n = div64_s64(ktime_sub_ns(now, base), cycle);
1005 *start = ktime_add_ns(base, (n + 1) * cycle);
1006 return 0;
1007 }
1008
setup_first_close_time(struct taprio_sched * q,struct sched_gate_list * sched,ktime_t base)1009 static void setup_first_close_time(struct taprio_sched *q,
1010 struct sched_gate_list *sched, ktime_t base)
1011 {
1012 struct sched_entry *first;
1013 ktime_t cycle;
1014
1015 first = list_first_entry(&sched->entries,
1016 struct sched_entry, list);
1017
1018 cycle = sched->cycle_time;
1019
1020 /* FIXME: find a better place to do this */
1021 sched->cycle_close_time = ktime_add_ns(base, cycle);
1022
1023 first->close_time = ktime_add_ns(base, first->interval);
1024 taprio_set_budget(q, first);
1025 rcu_assign_pointer(q->current_entry, NULL);
1026 }
1027
taprio_start_sched(struct Qdisc * sch,ktime_t start,struct sched_gate_list * new)1028 static void taprio_start_sched(struct Qdisc *sch,
1029 ktime_t start, struct sched_gate_list *new)
1030 {
1031 struct taprio_sched *q = qdisc_priv(sch);
1032 ktime_t expires;
1033
1034 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1035 return;
1036
1037 expires = hrtimer_get_expires(&q->advance_timer);
1038 if (expires == 0)
1039 expires = KTIME_MAX;
1040
1041 /* If the new schedule starts before the next expiration, we
1042 * reprogram it to the earliest one, so we change the admin
1043 * schedule to the operational one at the right time.
1044 */
1045 start = min_t(ktime_t, start, expires);
1046
1047 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1048 }
1049
taprio_set_picos_per_byte(struct net_device * dev,struct taprio_sched * q)1050 static void taprio_set_picos_per_byte(struct net_device *dev,
1051 struct taprio_sched *q)
1052 {
1053 struct ethtool_link_ksettings ecmd;
1054 int speed = SPEED_10;
1055 int picos_per_byte;
1056 int err;
1057
1058 err = __ethtool_get_link_ksettings(dev, &ecmd);
1059 if (err < 0)
1060 goto skip;
1061
1062 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1063 speed = ecmd.base.speed;
1064
1065 skip:
1066 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1067
1068 atomic64_set(&q->picos_per_byte, picos_per_byte);
1069 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1070 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1071 ecmd.base.speed);
1072 }
1073
taprio_dev_notifier(struct notifier_block * nb,unsigned long event,void * ptr)1074 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1075 void *ptr)
1076 {
1077 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1078 struct net_device *qdev;
1079 struct taprio_sched *q;
1080 bool found = false;
1081
1082 ASSERT_RTNL();
1083
1084 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1085 return NOTIFY_DONE;
1086
1087 spin_lock(&taprio_list_lock);
1088 list_for_each_entry(q, &taprio_list, taprio_list) {
1089 qdev = qdisc_dev(q->root);
1090 if (qdev == dev) {
1091 found = true;
1092 break;
1093 }
1094 }
1095 spin_unlock(&taprio_list_lock);
1096
1097 if (found)
1098 taprio_set_picos_per_byte(dev, q);
1099
1100 return NOTIFY_DONE;
1101 }
1102
setup_txtime(struct taprio_sched * q,struct sched_gate_list * sched,ktime_t base)1103 static void setup_txtime(struct taprio_sched *q,
1104 struct sched_gate_list *sched, ktime_t base)
1105 {
1106 struct sched_entry *entry;
1107 u32 interval = 0;
1108
1109 list_for_each_entry(entry, &sched->entries, list) {
1110 entry->next_txtime = ktime_add_ns(base, interval);
1111 interval += entry->interval;
1112 }
1113 }
1114
taprio_offload_alloc(int num_entries)1115 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1116 {
1117 struct __tc_taprio_qopt_offload *__offload;
1118
1119 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1120 GFP_KERNEL);
1121 if (!__offload)
1122 return NULL;
1123
1124 refcount_set(&__offload->users, 1);
1125
1126 return &__offload->offload;
1127 }
1128
taprio_offload_get(struct tc_taprio_qopt_offload * offload)1129 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1130 *offload)
1131 {
1132 struct __tc_taprio_qopt_offload *__offload;
1133
1134 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1135 offload);
1136
1137 refcount_inc(&__offload->users);
1138
1139 return offload;
1140 }
1141 EXPORT_SYMBOL_GPL(taprio_offload_get);
1142
taprio_offload_free(struct tc_taprio_qopt_offload * offload)1143 void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1144 {
1145 struct __tc_taprio_qopt_offload *__offload;
1146
1147 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1148 offload);
1149
1150 if (!refcount_dec_and_test(&__offload->users))
1151 return;
1152
1153 kfree(__offload);
1154 }
1155 EXPORT_SYMBOL_GPL(taprio_offload_free);
1156
1157 /* The function will only serve to keep the pointers to the "oper" and "admin"
1158 * schedules valid in relation to their base times, so when calling dump() the
1159 * users looks at the right schedules.
1160 * When using full offload, the admin configuration is promoted to oper at the
1161 * base_time in the PHC time domain. But because the system time is not
1162 * necessarily in sync with that, we can't just trigger a hrtimer to call
1163 * switch_schedules at the right hardware time.
1164 * At the moment we call this by hand right away from taprio, but in the future
1165 * it will be useful to create a mechanism for drivers to notify taprio of the
1166 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1167 * This is left as TODO.
1168 */
taprio_offload_config_changed(struct taprio_sched * q)1169 static void taprio_offload_config_changed(struct taprio_sched *q)
1170 {
1171 struct sched_gate_list *oper, *admin;
1172
1173 spin_lock(&q->current_entry_lock);
1174
1175 oper = rcu_dereference_protected(q->oper_sched,
1176 lockdep_is_held(&q->current_entry_lock));
1177 admin = rcu_dereference_protected(q->admin_sched,
1178 lockdep_is_held(&q->current_entry_lock));
1179
1180 switch_schedules(q, &admin, &oper);
1181
1182 spin_unlock(&q->current_entry_lock);
1183 }
1184
tc_map_to_queue_mask(struct net_device * dev,u32 tc_mask)1185 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1186 {
1187 u32 i, queue_mask = 0;
1188
1189 for (i = 0; i < dev->num_tc; i++) {
1190 u32 offset, count;
1191
1192 if (!(tc_mask & BIT(i)))
1193 continue;
1194
1195 offset = dev->tc_to_txq[i].offset;
1196 count = dev->tc_to_txq[i].count;
1197
1198 queue_mask |= GENMASK(offset + count - 1, offset);
1199 }
1200
1201 return queue_mask;
1202 }
1203
taprio_sched_to_offload(struct net_device * dev,struct sched_gate_list * sched,struct tc_taprio_qopt_offload * offload)1204 static void taprio_sched_to_offload(struct net_device *dev,
1205 struct sched_gate_list *sched,
1206 struct tc_taprio_qopt_offload *offload)
1207 {
1208 struct sched_entry *entry;
1209 int i = 0;
1210
1211 offload->base_time = sched->base_time;
1212 offload->cycle_time = sched->cycle_time;
1213 offload->cycle_time_extension = sched->cycle_time_extension;
1214
1215 list_for_each_entry(entry, &sched->entries, list) {
1216 struct tc_taprio_sched_entry *e = &offload->entries[i];
1217
1218 e->command = entry->command;
1219 e->interval = entry->interval;
1220 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
1221
1222 i++;
1223 }
1224
1225 offload->num_entries = i;
1226 }
1227
taprio_enable_offload(struct net_device * dev,struct taprio_sched * q,struct sched_gate_list * sched,struct netlink_ext_ack * extack)1228 static int taprio_enable_offload(struct net_device *dev,
1229 struct taprio_sched *q,
1230 struct sched_gate_list *sched,
1231 struct netlink_ext_ack *extack)
1232 {
1233 const struct net_device_ops *ops = dev->netdev_ops;
1234 struct tc_taprio_qopt_offload *offload;
1235 int err = 0;
1236
1237 if (!ops->ndo_setup_tc) {
1238 NL_SET_ERR_MSG(extack,
1239 "Device does not support taprio offload");
1240 return -EOPNOTSUPP;
1241 }
1242
1243 offload = taprio_offload_alloc(sched->num_entries);
1244 if (!offload) {
1245 NL_SET_ERR_MSG(extack,
1246 "Not enough memory for enabling offload mode");
1247 return -ENOMEM;
1248 }
1249 offload->enable = 1;
1250 taprio_sched_to_offload(dev, sched, offload);
1251
1252 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1253 if (err < 0) {
1254 NL_SET_ERR_MSG(extack,
1255 "Device failed to setup taprio offload");
1256 goto done;
1257 }
1258
1259 done:
1260 taprio_offload_free(offload);
1261
1262 return err;
1263 }
1264
taprio_disable_offload(struct net_device * dev,struct taprio_sched * q,struct netlink_ext_ack * extack)1265 static int taprio_disable_offload(struct net_device *dev,
1266 struct taprio_sched *q,
1267 struct netlink_ext_ack *extack)
1268 {
1269 const struct net_device_ops *ops = dev->netdev_ops;
1270 struct tc_taprio_qopt_offload *offload;
1271 int err;
1272
1273 if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
1274 return 0;
1275
1276 if (!ops->ndo_setup_tc)
1277 return -EOPNOTSUPP;
1278
1279 offload = taprio_offload_alloc(0);
1280 if (!offload) {
1281 NL_SET_ERR_MSG(extack,
1282 "Not enough memory to disable offload mode");
1283 return -ENOMEM;
1284 }
1285 offload->enable = 0;
1286
1287 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1288 if (err < 0) {
1289 NL_SET_ERR_MSG(extack,
1290 "Device failed to disable offload");
1291 goto out;
1292 }
1293
1294 out:
1295 taprio_offload_free(offload);
1296
1297 return err;
1298 }
1299
1300 /* If full offload is enabled, the only possible clockid is the net device's
1301 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1302 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1303 * in sync with the specified clockid via a user space daemon such as phc2sys.
1304 * For both software taprio and txtime-assist, the clockid is used for the
1305 * hrtimer that advances the schedule and hence mandatory.
1306 */
taprio_parse_clockid(struct Qdisc * sch,struct nlattr ** tb,struct netlink_ext_ack * extack)1307 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1308 struct netlink_ext_ack *extack)
1309 {
1310 struct taprio_sched *q = qdisc_priv(sch);
1311 struct net_device *dev = qdisc_dev(sch);
1312 int err = -EINVAL;
1313
1314 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1315 const struct ethtool_ops *ops = dev->ethtool_ops;
1316 struct ethtool_ts_info info = {
1317 .cmd = ETHTOOL_GET_TS_INFO,
1318 .phc_index = -1,
1319 };
1320
1321 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1322 NL_SET_ERR_MSG(extack,
1323 "The 'clockid' cannot be specified for full offload");
1324 goto out;
1325 }
1326
1327 if (ops && ops->get_ts_info)
1328 err = ops->get_ts_info(dev, &info);
1329
1330 if (err || info.phc_index < 0) {
1331 NL_SET_ERR_MSG(extack,
1332 "Device does not have a PTP clock");
1333 err = -ENOTSUPP;
1334 goto out;
1335 }
1336 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1337 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1338
1339 /* We only support static clockids and we don't allow
1340 * for it to be modified after the first init.
1341 */
1342 if (clockid < 0 ||
1343 (q->clockid != -1 && q->clockid != clockid)) {
1344 NL_SET_ERR_MSG(extack,
1345 "Changing the 'clockid' of a running schedule is not supported");
1346 err = -ENOTSUPP;
1347 goto out;
1348 }
1349
1350 switch (clockid) {
1351 case CLOCK_REALTIME:
1352 q->tk_offset = TK_OFFS_REAL;
1353 break;
1354 case CLOCK_MONOTONIC:
1355 q->tk_offset = TK_OFFS_MAX;
1356 break;
1357 case CLOCK_BOOTTIME:
1358 q->tk_offset = TK_OFFS_BOOT;
1359 break;
1360 case CLOCK_TAI:
1361 q->tk_offset = TK_OFFS_TAI;
1362 break;
1363 default:
1364 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1365 err = -EINVAL;
1366 goto out;
1367 }
1368
1369 q->clockid = clockid;
1370 } else {
1371 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1372 goto out;
1373 }
1374
1375 /* Everything went ok, return success. */
1376 err = 0;
1377
1378 out:
1379 return err;
1380 }
1381
taprio_mqprio_cmp(const struct net_device * dev,const struct tc_mqprio_qopt * mqprio)1382 static int taprio_mqprio_cmp(const struct net_device *dev,
1383 const struct tc_mqprio_qopt *mqprio)
1384 {
1385 int i;
1386
1387 if (!mqprio || mqprio->num_tc != dev->num_tc)
1388 return -1;
1389
1390 for (i = 0; i < mqprio->num_tc; i++)
1391 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1392 dev->tc_to_txq[i].offset != mqprio->offset[i])
1393 return -1;
1394
1395 for (i = 0; i <= TC_BITMASK; i++)
1396 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1397 return -1;
1398
1399 return 0;
1400 }
1401
1402 /* The semantics of the 'flags' argument in relation to 'change()'
1403 * requests, are interpreted following two rules (which are applied in
1404 * this order): (1) an omitted 'flags' argument is interpreted as
1405 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1406 * changed.
1407 */
taprio_new_flags(const struct nlattr * attr,u32 old,struct netlink_ext_ack * extack)1408 static int taprio_new_flags(const struct nlattr *attr, u32 old,
1409 struct netlink_ext_ack *extack)
1410 {
1411 u32 new = 0;
1412
1413 if (attr)
1414 new = nla_get_u32(attr);
1415
1416 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1417 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1418 return -EOPNOTSUPP;
1419 }
1420
1421 if (!taprio_flags_valid(new)) {
1422 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1423 return -EINVAL;
1424 }
1425
1426 return new;
1427 }
1428
taprio_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1429 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1430 struct netlink_ext_ack *extack)
1431 {
1432 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1433 struct sched_gate_list *oper, *admin, *new_admin;
1434 struct taprio_sched *q = qdisc_priv(sch);
1435 struct net_device *dev = qdisc_dev(sch);
1436 struct tc_mqprio_qopt *mqprio = NULL;
1437 unsigned long flags;
1438 ktime_t start;
1439 int i, err;
1440
1441 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1442 taprio_policy, extack);
1443 if (err < 0)
1444 return err;
1445
1446 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1447 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1448
1449 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1450 q->flags, extack);
1451 if (err < 0)
1452 return err;
1453
1454 q->flags = err;
1455
1456 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1457 if (err < 0)
1458 return err;
1459
1460 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1461 if (!new_admin) {
1462 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1463 return -ENOMEM;
1464 }
1465 INIT_LIST_HEAD(&new_admin->entries);
1466
1467 rcu_read_lock();
1468 oper = rcu_dereference(q->oper_sched);
1469 admin = rcu_dereference(q->admin_sched);
1470 rcu_read_unlock();
1471
1472 /* no changes - no new mqprio settings */
1473 if (!taprio_mqprio_cmp(dev, mqprio))
1474 mqprio = NULL;
1475
1476 if (mqprio && (oper || admin)) {
1477 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1478 err = -ENOTSUPP;
1479 goto free_sched;
1480 }
1481
1482 err = parse_taprio_schedule(q, tb, new_admin, extack);
1483 if (err < 0)
1484 goto free_sched;
1485
1486 if (new_admin->num_entries == 0) {
1487 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1488 err = -EINVAL;
1489 goto free_sched;
1490 }
1491
1492 err = taprio_parse_clockid(sch, tb, extack);
1493 if (err < 0)
1494 goto free_sched;
1495
1496 taprio_set_picos_per_byte(dev, q);
1497
1498 if (mqprio) {
1499 netdev_set_num_tc(dev, mqprio->num_tc);
1500 for (i = 0; i < mqprio->num_tc; i++)
1501 netdev_set_tc_queue(dev, i,
1502 mqprio->count[i],
1503 mqprio->offset[i]);
1504
1505 /* Always use supplied priority mappings */
1506 for (i = 0; i <= TC_BITMASK; i++)
1507 netdev_set_prio_tc_map(dev, i,
1508 mqprio->prio_tc_map[i]);
1509 }
1510
1511 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1512 err = taprio_enable_offload(dev, q, new_admin, extack);
1513 else
1514 err = taprio_disable_offload(dev, q, extack);
1515 if (err)
1516 goto free_sched;
1517
1518 /* Protects against enqueue()/dequeue() */
1519 spin_lock_bh(qdisc_lock(sch));
1520
1521 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1522 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1523 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1524 err = -EINVAL;
1525 goto unlock;
1526 }
1527
1528 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1529 }
1530
1531 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1532 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1533 !hrtimer_active(&q->advance_timer)) {
1534 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1535 q->advance_timer.function = advance_sched;
1536 }
1537
1538 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1539 q->dequeue = taprio_dequeue_offload;
1540 q->peek = taprio_peek_offload;
1541 } else {
1542 /* Be sure to always keep the function pointers
1543 * in a consistent state.
1544 */
1545 q->dequeue = taprio_dequeue_soft;
1546 q->peek = taprio_peek_soft;
1547 }
1548
1549 err = taprio_get_start_time(sch, new_admin, &start);
1550 if (err < 0) {
1551 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1552 goto unlock;
1553 }
1554
1555 setup_txtime(q, new_admin, start);
1556
1557 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1558 if (!oper) {
1559 rcu_assign_pointer(q->oper_sched, new_admin);
1560 err = 0;
1561 new_admin = NULL;
1562 goto unlock;
1563 }
1564
1565 rcu_assign_pointer(q->admin_sched, new_admin);
1566 if (admin)
1567 call_rcu(&admin->rcu, taprio_free_sched_cb);
1568 } else {
1569 setup_first_close_time(q, new_admin, start);
1570
1571 /* Protects against advance_sched() */
1572 spin_lock_irqsave(&q->current_entry_lock, flags);
1573
1574 taprio_start_sched(sch, start, new_admin);
1575
1576 rcu_assign_pointer(q->admin_sched, new_admin);
1577 if (admin)
1578 call_rcu(&admin->rcu, taprio_free_sched_cb);
1579
1580 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1581
1582 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1583 taprio_offload_config_changed(q);
1584 }
1585
1586 new_admin = NULL;
1587 err = 0;
1588
1589 unlock:
1590 spin_unlock_bh(qdisc_lock(sch));
1591
1592 free_sched:
1593 if (new_admin)
1594 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1595
1596 return err;
1597 }
1598
taprio_destroy(struct Qdisc * sch)1599 static void taprio_destroy(struct Qdisc *sch)
1600 {
1601 struct taprio_sched *q = qdisc_priv(sch);
1602 struct net_device *dev = qdisc_dev(sch);
1603 unsigned int i;
1604
1605 spin_lock(&taprio_list_lock);
1606 list_del(&q->taprio_list);
1607 spin_unlock(&taprio_list_lock);
1608
1609 hrtimer_cancel(&q->advance_timer);
1610
1611 taprio_disable_offload(dev, q, NULL);
1612
1613 if (q->qdiscs) {
1614 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1615 qdisc_put(q->qdiscs[i]);
1616
1617 kfree(q->qdiscs);
1618 }
1619 q->qdiscs = NULL;
1620
1621 netdev_reset_tc(dev);
1622
1623 if (q->oper_sched)
1624 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1625
1626 if (q->admin_sched)
1627 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1628 }
1629
taprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1630 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1631 struct netlink_ext_ack *extack)
1632 {
1633 struct taprio_sched *q = qdisc_priv(sch);
1634 struct net_device *dev = qdisc_dev(sch);
1635 int i;
1636
1637 spin_lock_init(&q->current_entry_lock);
1638
1639 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1640 q->advance_timer.function = advance_sched;
1641
1642 q->dequeue = taprio_dequeue_soft;
1643 q->peek = taprio_peek_soft;
1644
1645 q->root = sch;
1646
1647 /* We only support static clockids. Use an invalid value as default
1648 * and get the valid one on taprio_change().
1649 */
1650 q->clockid = -1;
1651 q->flags = TAPRIO_FLAGS_INVALID;
1652
1653 spin_lock(&taprio_list_lock);
1654 list_add(&q->taprio_list, &taprio_list);
1655 spin_unlock(&taprio_list_lock);
1656
1657 if (sch->parent != TC_H_ROOT)
1658 return -EOPNOTSUPP;
1659
1660 if (!netif_is_multiqueue(dev))
1661 return -EOPNOTSUPP;
1662
1663 /* pre-allocate qdisc, attachment can't fail */
1664 q->qdiscs = kcalloc(dev->num_tx_queues,
1665 sizeof(q->qdiscs[0]),
1666 GFP_KERNEL);
1667
1668 if (!q->qdiscs)
1669 return -ENOMEM;
1670
1671 if (!opt)
1672 return -EINVAL;
1673
1674 for (i = 0; i < dev->num_tx_queues; i++) {
1675 struct netdev_queue *dev_queue;
1676 struct Qdisc *qdisc;
1677
1678 dev_queue = netdev_get_tx_queue(dev, i);
1679 qdisc = qdisc_create_dflt(dev_queue,
1680 &pfifo_qdisc_ops,
1681 TC_H_MAKE(TC_H_MAJ(sch->handle),
1682 TC_H_MIN(i + 1)),
1683 extack);
1684 if (!qdisc)
1685 return -ENOMEM;
1686
1687 if (i < dev->real_num_tx_queues)
1688 qdisc_hash_add(qdisc, false);
1689
1690 q->qdiscs[i] = qdisc;
1691 }
1692
1693 return taprio_change(sch, opt, extack);
1694 }
1695
taprio_queue_get(struct Qdisc * sch,unsigned long cl)1696 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1697 unsigned long cl)
1698 {
1699 struct net_device *dev = qdisc_dev(sch);
1700 unsigned long ntx = cl - 1;
1701
1702 if (ntx >= dev->num_tx_queues)
1703 return NULL;
1704
1705 return netdev_get_tx_queue(dev, ntx);
1706 }
1707
taprio_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1708 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1709 struct Qdisc *new, struct Qdisc **old,
1710 struct netlink_ext_ack *extack)
1711 {
1712 struct taprio_sched *q = qdisc_priv(sch);
1713 struct net_device *dev = qdisc_dev(sch);
1714 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1715
1716 if (!dev_queue)
1717 return -EINVAL;
1718
1719 if (dev->flags & IFF_UP)
1720 dev_deactivate(dev);
1721
1722 *old = q->qdiscs[cl - 1];
1723 q->qdiscs[cl - 1] = new;
1724
1725 if (new)
1726 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1727
1728 if (dev->flags & IFF_UP)
1729 dev_activate(dev);
1730
1731 return 0;
1732 }
1733
dump_entry(struct sk_buff * msg,const struct sched_entry * entry)1734 static int dump_entry(struct sk_buff *msg,
1735 const struct sched_entry *entry)
1736 {
1737 struct nlattr *item;
1738
1739 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1740 if (!item)
1741 return -ENOSPC;
1742
1743 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1744 goto nla_put_failure;
1745
1746 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1747 goto nla_put_failure;
1748
1749 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1750 entry->gate_mask))
1751 goto nla_put_failure;
1752
1753 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1754 entry->interval))
1755 goto nla_put_failure;
1756
1757 return nla_nest_end(msg, item);
1758
1759 nla_put_failure:
1760 nla_nest_cancel(msg, item);
1761 return -1;
1762 }
1763
dump_schedule(struct sk_buff * msg,const struct sched_gate_list * root)1764 static int dump_schedule(struct sk_buff *msg,
1765 const struct sched_gate_list *root)
1766 {
1767 struct nlattr *entry_list;
1768 struct sched_entry *entry;
1769
1770 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1771 root->base_time, TCA_TAPRIO_PAD))
1772 return -1;
1773
1774 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1775 root->cycle_time, TCA_TAPRIO_PAD))
1776 return -1;
1777
1778 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1779 root->cycle_time_extension, TCA_TAPRIO_PAD))
1780 return -1;
1781
1782 entry_list = nla_nest_start_noflag(msg,
1783 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1784 if (!entry_list)
1785 goto error_nest;
1786
1787 list_for_each_entry(entry, &root->entries, list) {
1788 if (dump_entry(msg, entry) < 0)
1789 goto error_nest;
1790 }
1791
1792 nla_nest_end(msg, entry_list);
1793 return 0;
1794
1795 error_nest:
1796 nla_nest_cancel(msg, entry_list);
1797 return -1;
1798 }
1799
taprio_dump(struct Qdisc * sch,struct sk_buff * skb)1800 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1801 {
1802 struct taprio_sched *q = qdisc_priv(sch);
1803 struct net_device *dev = qdisc_dev(sch);
1804 struct sched_gate_list *oper, *admin;
1805 struct tc_mqprio_qopt opt = { 0 };
1806 struct nlattr *nest, *sched_nest;
1807 unsigned int i;
1808
1809 rcu_read_lock();
1810 oper = rcu_dereference(q->oper_sched);
1811 admin = rcu_dereference(q->admin_sched);
1812
1813 opt.num_tc = netdev_get_num_tc(dev);
1814 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1815
1816 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1817 opt.count[i] = dev->tc_to_txq[i].count;
1818 opt.offset[i] = dev->tc_to_txq[i].offset;
1819 }
1820
1821 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1822 if (!nest)
1823 goto start_error;
1824
1825 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1826 goto options_error;
1827
1828 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1829 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1830 goto options_error;
1831
1832 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1833 goto options_error;
1834
1835 if (q->txtime_delay &&
1836 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1837 goto options_error;
1838
1839 if (oper && dump_schedule(skb, oper))
1840 goto options_error;
1841
1842 if (!admin)
1843 goto done;
1844
1845 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1846 if (!sched_nest)
1847 goto options_error;
1848
1849 if (dump_schedule(skb, admin))
1850 goto admin_error;
1851
1852 nla_nest_end(skb, sched_nest);
1853
1854 done:
1855 rcu_read_unlock();
1856
1857 return nla_nest_end(skb, nest);
1858
1859 admin_error:
1860 nla_nest_cancel(skb, sched_nest);
1861
1862 options_error:
1863 nla_nest_cancel(skb, nest);
1864
1865 start_error:
1866 rcu_read_unlock();
1867 return -ENOSPC;
1868 }
1869
taprio_leaf(struct Qdisc * sch,unsigned long cl)1870 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1871 {
1872 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1873
1874 if (!dev_queue)
1875 return NULL;
1876
1877 return dev_queue->qdisc_sleeping;
1878 }
1879
taprio_find(struct Qdisc * sch,u32 classid)1880 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1881 {
1882 unsigned int ntx = TC_H_MIN(classid);
1883
1884 if (!taprio_queue_get(sch, ntx))
1885 return 0;
1886 return ntx;
1887 }
1888
taprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)1889 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1890 struct sk_buff *skb, struct tcmsg *tcm)
1891 {
1892 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1893
1894 tcm->tcm_parent = TC_H_ROOT;
1895 tcm->tcm_handle |= TC_H_MIN(cl);
1896 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1897
1898 return 0;
1899 }
1900
taprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)1901 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1902 struct gnet_dump *d)
1903 __releases(d->lock)
1904 __acquires(d->lock)
1905 {
1906 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1907
1908 sch = dev_queue->qdisc_sleeping;
1909 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1910 qdisc_qstats_copy(d, sch) < 0)
1911 return -1;
1912 return 0;
1913 }
1914
taprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)1915 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1916 {
1917 struct net_device *dev = qdisc_dev(sch);
1918 unsigned long ntx;
1919
1920 if (arg->stop)
1921 return;
1922
1923 arg->count = arg->skip;
1924 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1925 if (arg->fn(sch, ntx + 1, arg) < 0) {
1926 arg->stop = 1;
1927 break;
1928 }
1929 arg->count++;
1930 }
1931 }
1932
taprio_select_queue(struct Qdisc * sch,struct tcmsg * tcm)1933 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1934 struct tcmsg *tcm)
1935 {
1936 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1937 }
1938
1939 static const struct Qdisc_class_ops taprio_class_ops = {
1940 .graft = taprio_graft,
1941 .leaf = taprio_leaf,
1942 .find = taprio_find,
1943 .walk = taprio_walk,
1944 .dump = taprio_dump_class,
1945 .dump_stats = taprio_dump_class_stats,
1946 .select_queue = taprio_select_queue,
1947 };
1948
1949 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1950 .cl_ops = &taprio_class_ops,
1951 .id = "taprio",
1952 .priv_size = sizeof(struct taprio_sched),
1953 .init = taprio_init,
1954 .change = taprio_change,
1955 .destroy = taprio_destroy,
1956 .peek = taprio_peek,
1957 .dequeue = taprio_dequeue,
1958 .enqueue = taprio_enqueue,
1959 .dump = taprio_dump,
1960 .owner = THIS_MODULE,
1961 };
1962
1963 static struct notifier_block taprio_device_notifier = {
1964 .notifier_call = taprio_dev_notifier,
1965 };
1966
taprio_module_init(void)1967 static int __init taprio_module_init(void)
1968 {
1969 int err = register_netdevice_notifier(&taprio_device_notifier);
1970
1971 if (err)
1972 return err;
1973
1974 return register_qdisc(&taprio_qdisc_ops);
1975 }
1976
taprio_module_exit(void)1977 static void __exit taprio_module_exit(void)
1978 {
1979 unregister_qdisc(&taprio_qdisc_ops);
1980 unregister_netdevice_notifier(&taprio_device_notifier);
1981 }
1982
1983 module_init(taprio_module_init);
1984 module_exit(taprio_module_exit);
1985 MODULE_LICENSE("GPL");
1986