/Linux-v5.4/drivers/s390/cio/ |
D | cio.c | 88 int cio_set_options(struct subchannel *sch, int flags) in cio_set_options() argument 90 struct io_subchannel_private *priv = to_io_private(sch); in cio_set_options() 99 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) in cio_start_handle_notoper() argument 104 sch->lpm &= ~lpm; in cio_start_handle_notoper() 106 sch->lpm = 0; in cio_start_handle_notoper() 109 "subchannel 0.%x.%04x!\n", sch->schid.ssid, in cio_start_handle_notoper() 110 sch->schid.sch_no); in cio_start_handle_notoper() 112 if (cio_update_schib(sch)) in cio_start_handle_notoper() 115 sprintf(dbf_text, "no%s", dev_name(&sch->dev)); in cio_start_handle_notoper() 117 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); in cio_start_handle_notoper() [all …]
|
D | eadm_sch.c | 59 static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob) in eadm_subchannel_start() argument 61 union orb *orb = &get_eadm_private(sch)->orb; in eadm_subchannel_start() 66 orb->eadm.intparm = (u32)(addr_t)sch; in eadm_subchannel_start() 70 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid)); in eadm_subchannel_start() 72 cc = ssch(sch->schid, orb); in eadm_subchannel_start() 75 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND; in eadm_subchannel_start() 86 static int eadm_subchannel_clear(struct subchannel *sch) in eadm_subchannel_clear() argument 90 cc = csch(sch->schid); in eadm_subchannel_clear() 94 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND; in eadm_subchannel_clear() 101 struct subchannel *sch = private->sch; in eadm_subchannel_timeout() local [all …]
|
D | css.c | 72 struct subchannel *sch = to_subchannel(dev); in call_fn_known_sch() local 77 idset_sch_del(cb->set, sch->schid); in call_fn_known_sch() 79 rc = cb->fn_known_sch(sch, cb->data); in call_fn_known_sch() 96 struct subchannel *sch; in call_fn_all_sch() local 99 sch = get_subchannel_by_schid(schid); in call_fn_all_sch() 100 if (sch) { in call_fn_all_sch() 102 rc = cb->fn_known_sch(sch, cb->data); in call_fn_all_sch() 103 put_device(&sch->dev); in call_fn_all_sch() 152 static int css_sch_create_locks(struct subchannel *sch) in css_sch_create_locks() argument 154 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); in css_sch_create_locks() [all …]
|
D | device.c | 152 static int io_subchannel_prepare(struct subchannel *sch) in io_subchannel_prepare() argument 159 cdev = sch_get_cdev(sch); in io_subchannel_prepare() 287 struct subchannel *sch; in ccw_device_set_offline() local 301 sch = to_subchannel(cdev->dev.parent); in ccw_device_set_offline() 322 io_subchannel_quiesce(sch); in ccw_device_set_offline() 554 struct subchannel *sch; in available_show() local 564 sch = to_subchannel(dev->parent); in available_show() 565 if (!sch->lpm) in available_show() 579 struct subchannel *sch = to_subchannel(dev); in initiate_logging() local 582 rc = chsc_siosl(sch->schid); in initiate_logging() [all …]
|
D | vfio_ccw_drv.c | 36 int vfio_ccw_sch_quiesce(struct subchannel *sch) in vfio_ccw_sch_quiesce() argument 38 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); in vfio_ccw_sch_quiesce() 42 spin_lock_irq(sch->lock); in vfio_ccw_sch_quiesce() 43 if (!sch->schib.pmcw.ena) in vfio_ccw_sch_quiesce() 45 ret = cio_disable_subchannel(sch); in vfio_ccw_sch_quiesce() 52 ret = cio_cancel_halt_clear(sch, &iretry); in vfio_ccw_sch_quiesce() 56 sch->schid.ssid, sch->schid.sch_no); in vfio_ccw_sch_quiesce() 65 spin_unlock_irq(sch->lock); in vfio_ccw_sch_quiesce() 72 spin_lock_irq(sch->lock); in vfio_ccw_sch_quiesce() 73 ret = cio_disable_subchannel(sch); in vfio_ccw_sch_quiesce() [all …]
|
D | vfio_ccw_fsm.c | 23 struct subchannel *sch; in fsm_io_helper() local 30 sch = private->sch; in fsm_io_helper() 32 spin_lock_irqsave(sch->lock, flags); in fsm_io_helper() 34 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); in fsm_io_helper() 41 VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev)); in fsm_io_helper() 44 ccode = ssch(sch->schid, orb); in fsm_io_helper() 53 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; in fsm_io_helper() 65 sch->lpm &= ~lpm; in fsm_io_helper() 67 sch->lpm = 0; in fsm_io_helper() 69 if (cio_update_schib(sch)) in fsm_io_helper() [all …]
|
/Linux-v5.4/drivers/gpio/ |
D | gpio-sch.c | 29 static unsigned sch_gpio_offset(struct sch_gpio *sch, unsigned gpio, in sch_gpio_offset() argument 34 if (gpio >= sch->resume_base) { in sch_gpio_offset() 35 gpio -= sch->resume_base; in sch_gpio_offset() 42 static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio) in sch_gpio_bit() argument 44 if (gpio >= sch->resume_base) in sch_gpio_bit() 45 gpio -= sch->resume_base; in sch_gpio_bit() 49 static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg) in sch_gpio_reg_get() argument 54 offset = sch_gpio_offset(sch, gpio, reg); in sch_gpio_reg_get() 55 bit = sch_gpio_bit(sch, gpio); in sch_gpio_reg_get() 57 reg_val = !!(inb(sch->iobase + offset) & BIT(bit)); in sch_gpio_reg_get() [all …]
|
/Linux-v5.4/net/sched/ |
D | sch_mq.c | 24 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd) in mq_offload() argument 26 struct net_device *dev = qdisc_dev(sch); in mq_offload() 29 .handle = sch->handle, in mq_offload() 38 static int mq_offload_stats(struct Qdisc *sch) in mq_offload_stats() argument 42 .handle = sch->handle, in mq_offload_stats() 44 .bstats = &sch->bstats, in mq_offload_stats() 45 .qstats = &sch->qstats, in mq_offload_stats() 49 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt); in mq_offload_stats() 52 static void mq_destroy(struct Qdisc *sch) in mq_destroy() argument 54 struct net_device *dev = qdisc_dev(sch); in mq_destroy() [all …]
|
D | sch_prio.c | 31 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) in prio_classify() argument 33 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() 40 if (TC_H_MAJ(skb->priority) != sch->handle) { in prio_classify() 69 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) in prio_enqueue() argument 75 qdisc = prio_classify(skb, sch, &ret); in prio_enqueue() 80 qdisc_qstats_drop(sch); in prio_enqueue() 88 sch->qstats.backlog += len; in prio_enqueue() 89 sch->q.qlen++; in prio_enqueue() 93 qdisc_qstats_drop(sch); in prio_enqueue() 97 static struct sk_buff *prio_peek(struct Qdisc *sch) in prio_peek() argument [all …]
|
D | sch_fifo.c | 18 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, in bfifo_enqueue() argument 21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 22 return qdisc_enqueue_tail(skb, sch); in bfifo_enqueue() 24 return qdisc_drop(skb, sch, to_free); in bfifo_enqueue() 27 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, in pfifo_enqueue() argument 30 if (likely(sch->q.qlen < sch->limit)) in pfifo_enqueue() 31 return qdisc_enqueue_tail(skb, sch); in pfifo_enqueue() 33 return qdisc_drop(skb, sch, to_free); in pfifo_enqueue() 36 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, in pfifo_tail_enqueue() argument 41 if (likely(sch->q.qlen < sch->limit)) in pfifo_tail_enqueue() [all …]
|
D | sch_red.c | 40 struct Qdisc *sch; member 57 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, in red_enqueue() argument 60 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() 76 qdisc_qstats_overlimit(sch); in red_enqueue() 86 qdisc_qstats_overlimit(sch); in red_enqueue() 99 qdisc_qstats_backlog_inc(sch, skb); in red_enqueue() 100 sch->q.qlen++; in red_enqueue() 103 qdisc_qstats_drop(sch); in red_enqueue() 108 qdisc_drop(skb, sch, to_free); in red_enqueue() 112 static struct sk_buff *red_dequeue(struct Qdisc *sch) in red_dequeue() argument [all …]
|
D | sch_ingress.c | 23 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) in ingress_leaf() argument 28 static unsigned long ingress_find(struct Qdisc *sch, u32 classid) in ingress_find() argument 33 static unsigned long ingress_bind_filter(struct Qdisc *sch, in ingress_bind_filter() argument 36 return ingress_find(sch, classid); in ingress_bind_filter() 39 static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl) in ingress_unbind_filter() argument 43 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) in ingress_walk() argument 47 static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl, in ingress_tcf_block() argument 50 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_tcf_block() 62 static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index) in ingress_ingress_block_set() argument 64 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_ingress_block_set() [all …]
|
D | sch_codel.c | 71 struct Qdisc *sch = ctx; in dequeue_func() local 72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); in dequeue_func() 75 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 83 struct Qdisc *sch = ctx; in drop_func() local 86 qdisc_qstats_drop(sch); in drop_func() 89 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) in codel_qdisc_dequeue() argument 91 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() 94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue() 101 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue() 102 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); in codel_qdisc_dequeue() [all …]
|
D | sch_multiq.c | 30 multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) in multiq_classify() argument 32 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() 60 multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, in multiq_enqueue() argument 66 qdisc = multiq_classify(skb, sch, &ret); in multiq_enqueue() 71 qdisc_qstats_drop(sch); in multiq_enqueue() 79 sch->q.qlen++; in multiq_enqueue() 83 qdisc_qstats_drop(sch); in multiq_enqueue() 87 static struct sk_buff *multiq_dequeue(struct Qdisc *sch) in multiq_dequeue() argument 89 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() 104 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { in multiq_dequeue() [all …]
|
D | sch_drr.c | 39 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) in drr_find_class() argument 41 struct drr_sched *q = qdisc_priv(sch); in drr_find_class() 54 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, in drr_change_class() argument 58 struct drr_sched *q = qdisc_priv(sch); in drr_change_class() 82 quantum = psched_mtu(qdisc_dev(sch)); in drr_change_class() 89 qdisc_root_sleeping_running(sch), in drr_change_class() 97 sch_tree_lock(sch); in drr_change_class() 100 sch_tree_unlock(sch); in drr_change_class() 111 cl->qdisc = qdisc_create_dflt(sch->dev_queue, in drr_change_class() 122 qdisc_root_sleeping_running(sch), in drr_change_class() [all …]
|
D | sch_atm.c | 73 static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) in lookup_flow() argument 75 struct atm_qdisc_data *p = qdisc_priv(sch); in lookup_flow() 85 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, in atm_tc_graft() argument 89 struct atm_qdisc_data *p = qdisc_priv(sch); in atm_tc_graft() 93 sch, p, flow, new, old); in atm_tc_graft() 105 static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl) in atm_tc_leaf() argument 109 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow); in atm_tc_leaf() 113 static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid) in atm_tc_find() argument 115 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch); in atm_tc_find() 118 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid); in atm_tc_find() [all …]
|
D | sch_dsmark.c | 64 static int dsmark_graft(struct Qdisc *sch, unsigned long arg, in dsmark_graft() argument 68 struct dsmark_qdisc_data *p = qdisc_priv(sch); in dsmark_graft() 71 __func__, sch, p, new, old); in dsmark_graft() 74 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in dsmark_graft() 75 sch->handle, NULL); in dsmark_graft() 80 *old = qdisc_replace(sch, new, &p->q); in dsmark_graft() 84 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg) in dsmark_leaf() argument 86 struct dsmark_qdisc_data *p = qdisc_priv(sch); in dsmark_leaf() 90 static unsigned long dsmark_find(struct Qdisc *sch, u32 classid) in dsmark_find() argument 95 static unsigned long dsmark_bind_filter(struct Qdisc *sch, in dsmark_bind_filter() argument [all …]
|
D | sch_skbprio.c | 68 static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, in skbprio_enqueue() argument 72 struct skbprio_sched_data *q = qdisc_priv(sch); in skbprio_enqueue() 82 if (sch->q.qlen < sch->limit) { in skbprio_enqueue() 84 qdisc_qstats_backlog_inc(sch, skb); in skbprio_enqueue() 94 sch->q.qlen++; in skbprio_enqueue() 103 return qdisc_drop(skb, sch, to_free); in skbprio_enqueue() 107 qdisc_qstats_backlog_inc(sch, skb); in skbprio_enqueue() 114 qdisc_qstats_backlog_dec(sch, to_drop); in skbprio_enqueue() 115 qdisc_drop(to_drop, sch, to_free); in skbprio_enqueue() 125 BUG_ON(sch->q.qlen != 1); in skbprio_enqueue() [all …]
|
D | sch_etf.c | 75 static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb) in is_packet_valid() argument 77 struct etf_sched_data *q = qdisc_priv(sch); in is_packet_valid() 108 static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch) in etf_peek_timesortedlist() argument 110 struct etf_sched_data *q = qdisc_priv(sch); in etf_peek_timesortedlist() 120 static void reset_watchdog(struct Qdisc *sch) in reset_watchdog() argument 122 struct etf_sched_data *q = qdisc_priv(sch); in reset_watchdog() 123 struct sk_buff *skb = etf_peek_timesortedlist(sch); in reset_watchdog() 161 static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, in etf_enqueue_timesortedlist() argument 164 struct etf_sched_data *q = qdisc_priv(sch); in etf_enqueue_timesortedlist() 169 if (!is_packet_valid(sch, nskb)) { in etf_enqueue_timesortedlist() [all …]
|
D | sch_fq_codel.c | 77 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, in fq_codel_classify() argument 80 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() 85 if (TC_H_MAJ(skb->priority) == sch->handle && in fq_codel_classify() 138 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, in fq_codel_drop() argument 141 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() 179 sch->qstats.drops += i; in fq_codel_drop() 180 sch->qstats.backlog -= len; in fq_codel_drop() 181 sch->q.qlen -= i; in fq_codel_drop() 185 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, in fq_codel_enqueue() argument 188 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_enqueue() [all …]
|
D | sch_cbs.c | 80 int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, 82 struct sk_buff *(*dequeue)(struct Qdisc *sch); 87 static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, in cbs_child_enqueue() argument 98 sch->qstats.backlog += len; in cbs_child_enqueue() 99 sch->q.qlen++; in cbs_child_enqueue() 104 static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, in cbs_enqueue_offload() argument 107 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_offload() 110 return cbs_child_enqueue(skb, sch, qdisc, to_free); in cbs_enqueue_offload() 113 static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, in cbs_enqueue_soft() argument 116 struct cbs_sched_data *q = qdisc_priv(sch); in cbs_enqueue_soft() [all …]
|
D | sch_pie.c | 68 struct Qdisc *sch; member 92 static bool drop_early(struct Qdisc *sch, u32 packet_size) in drop_early() argument 94 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() 97 u32 mtu = psched_mtu(qdisc_dev(sch)); in drop_early() 113 if (sch->qstats.backlog < 2 * mtu) in drop_early() 151 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, in pie_qdisc_enqueue() argument 154 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() 157 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { in pie_qdisc_enqueue() 162 if (!drop_early(sch, skb->len)) { in pie_qdisc_enqueue() 176 if (qdisc_qlen(sch) > q->stats.maxq) in pie_qdisc_enqueue() [all …]
|
D | sch_tbf.c | 143 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, in tbf_segment() argument 146 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() 155 return qdisc_drop(skb, sch, to_free); in tbf_segment() 166 qdisc_qstats_drop(sch); in tbf_segment() 172 sch->q.qlen += nb; in tbf_segment() 174 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); in tbf_segment() 179 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, in tbf_enqueue() argument 182 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() 189 return tbf_segment(skb, sch, to_free); in tbf_enqueue() 190 return qdisc_drop(skb, sch, to_free); in tbf_enqueue() [all …]
|
D | sch_mqprio.c | 30 static void mqprio_destroy(struct Qdisc *sch) in mqprio_destroy() argument 32 struct net_device *dev = qdisc_dev(sch); in mqprio_destroy() 33 struct mqprio_sched *priv = qdisc_priv(sch); in mqprio_destroy() 133 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, in mqprio_init() argument 136 struct net_device *dev = qdisc_dev(sch); in mqprio_init() 137 struct mqprio_sched *priv = qdisc_priv(sch); in mqprio_init() 150 if (sch->parent != TC_H_ROOT) in mqprio_init() 230 TC_H_MAKE(TC_H_MAJ(sch->handle), in mqprio_init() 285 sch->flags |= TCQ_F_MQROOT; in mqprio_init() 289 static void mqprio_attach(struct Qdisc *sch) in mqprio_attach() argument [all …]
|
/Linux-v5.4/include/net/ |
D | sch_generic.h | 59 struct Qdisc *sch, 61 struct sk_buff * (*dequeue)(struct Qdisc *sch); 214 struct tcf_block * (*tcf_block)(struct Qdisc *sch, 243 struct Qdisc *sch, 248 int (*init)(struct Qdisc *sch, struct nlattr *arg, 252 int (*change)(struct Qdisc *sch, 255 void (*attach)(struct Qdisc *sch); 261 void (*ingress_block_set)(struct Qdisc *sch, 263 void (*egress_block_set)(struct Qdisc *sch, 265 u32 (*ingress_block_get)(struct Qdisc *sch); [all …]
|