Skip to content

Commit 1351170

Browse files
Yackoudavem330
authored andcommitted
net: taprio offload: enforce qdisc to netdev queue mapping
Even though the taprio qdisc is designed for multiqueue devices, all the queues still point to the same top-level taprio qdisc. This works and is probably required for software taprio, but at least with offload taprio, it has an undesirable side effect: because the whole qdisc is run when a packet has to be sent, it allows packets in a best-effort class to be processed in the context of a task sending higher priority traffic. If there are packets left in the qdisc after that first run, the NET_TX softirq is raised and gets executed immediately in the same process context. As with any other softirq, it runs up to 10 times and for up to 2ms, during which the calling process is waiting for the sendmsg call (or similar) to return. In my use case, that calling process is a real-time task scheduled to send a packet every 2ms, so the long sendmsg calls are leading to missed timeslots. By attaching each netdev queue to its own qdisc, as it is done with the "classic" mq qdisc, each traffic class can be processed independently without touching the other classes. A high-priority process can then send packets without getting stuck in the sendmsg call anymore. Signed-off-by: Yannick Vignon <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent d8654f4 commit 1351170

File tree

1 file changed

+45
-40
lines changed

1 file changed

+45
-40
lines changed

net/sched/sch_taprio.c

Lines changed: 45 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -435,6 +435,11 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435435
struct Qdisc *child;
436436
int queue;
437437

438+
if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
439+
WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
440+
return qdisc_drop(skb, sch, to_free);
441+
}
442+
438443
queue = skb_get_queue_mapping(skb);
439444

440445
child = q->qdiscs[queue];
@@ -526,23 +531,7 @@ static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
526531

527532
static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
528533
{
529-
struct taprio_sched *q = qdisc_priv(sch);
530-
struct net_device *dev = qdisc_dev(sch);
531-
struct sk_buff *skb;
532-
int i;
533-
534-
for (i = 0; i < dev->num_tx_queues; i++) {
535-
struct Qdisc *child = q->qdiscs[i];
536-
537-
if (unlikely(!child))
538-
continue;
539-
540-
skb = child->ops->peek(child);
541-
if (!skb)
542-
continue;
543-
544-
return skb;
545-
}
534+
WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
546535

547536
return NULL;
548537
}
@@ -651,27 +640,7 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
651640

652641
static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
653642
{
654-
struct taprio_sched *q = qdisc_priv(sch);
655-
struct net_device *dev = qdisc_dev(sch);
656-
struct sk_buff *skb;
657-
int i;
658-
659-
for (i = 0; i < dev->num_tx_queues; i++) {
660-
struct Qdisc *child = q->qdiscs[i];
661-
662-
if (unlikely(!child))
663-
continue;
664-
665-
skb = child->ops->dequeue(child);
666-
if (unlikely(!skb))
667-
continue;
668-
669-
qdisc_bstats_update(sch, skb);
670-
qdisc_qstats_backlog_dec(sch, skb);
671-
sch->q.qlen--;
672-
673-
return skb;
674-
}
643+
WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
675644

676645
return NULL;
677646
}
@@ -1756,6 +1725,37 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
17561725
return taprio_change(sch, opt, extack);
17571726
}
17581727

1728+
static void taprio_attach(struct Qdisc *sch)
1729+
{
1730+
struct taprio_sched *q = qdisc_priv(sch);
1731+
struct net_device *dev = qdisc_dev(sch);
1732+
unsigned int ntx;
1733+
1734+
/* Attach underlying qdisc */
1735+
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1736+
struct Qdisc *qdisc = q->qdiscs[ntx];
1737+
struct Qdisc *old;
1738+
1739+
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1740+
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1741+
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1742+
if (ntx < dev->real_num_tx_queues)
1743+
qdisc_hash_add(qdisc, false);
1744+
} else {
1745+
old = dev_graft_qdisc(qdisc->dev_queue, sch);
1746+
qdisc_refcount_inc(sch);
1747+
}
1748+
if (old)
1749+
qdisc_put(old);
1750+
}
1751+
1752+
/* access to the child qdiscs is not needed in offload mode */
1753+
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1754+
kfree(q->qdiscs);
1755+
q->qdiscs = NULL;
1756+
}
1757+
}
1758+
17591759
static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
17601760
unsigned long cl)
17611761
{
@@ -1782,8 +1782,12 @@ static int taprio_graft(struct Qdisc *sch, unsigned long cl,
17821782
if (dev->flags & IFF_UP)
17831783
dev_deactivate(dev);
17841784

1785-
*old = q->qdiscs[cl - 1];
1786-
q->qdiscs[cl - 1] = new;
1785+
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1786+
*old = dev_graft_qdisc(dev_queue, new);
1787+
} else {
1788+
*old = q->qdiscs[cl - 1];
1789+
q->qdiscs[cl - 1] = new;
1790+
}
17871791

17881792
if (new)
17891793
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
@@ -2017,6 +2021,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
20172021
.change = taprio_change,
20182022
.destroy = taprio_destroy,
20192023
.reset = taprio_reset,
2024+
.attach = taprio_attach,
20202025
.peek = taprio_peek,
20212026
.dequeue = taprio_dequeue,
20222027
.enqueue = taprio_enqueue,

0 commit comments

Comments
 (0)