Lines Matching refs:ulpq
32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
41 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, in sctp_ulpq_init() argument
44 memset(ulpq, 0, sizeof(struct sctp_ulpq)); in sctp_ulpq_init()
46 ulpq->asoc = asoc; in sctp_ulpq_init()
47 skb_queue_head_init(&ulpq->reasm); in sctp_ulpq_init()
48 skb_queue_head_init(&ulpq->reasm_uo); in sctp_ulpq_init()
49 skb_queue_head_init(&ulpq->lobby); in sctp_ulpq_init()
50 ulpq->pd_mode = 0; in sctp_ulpq_init()
52 return ulpq; in sctp_ulpq_init()
57 void sctp_ulpq_flush(struct sctp_ulpq *ulpq) in sctp_ulpq_flush() argument
62 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { in sctp_ulpq_flush()
67 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { in sctp_ulpq_flush()
72 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) { in sctp_ulpq_flush()
79 void sctp_ulpq_free(struct sctp_ulpq *ulpq) in sctp_ulpq_free() argument
81 sctp_ulpq_flush(ulpq); in sctp_ulpq_free()
85 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_tail_data() argument
101 event = sctp_ulpq_reasm(ulpq, event); in sctp_ulpq_tail_data()
110 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_tail_data()
118 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_tail_data()
166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_set_pd() argument
168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
171 ulpq->pd_mode = 1; in sctp_ulpq_set_pd()
175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_clear_pd() argument
177 ulpq->pd_mode = 0; in sctp_ulpq_clear_pd()
178 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_clear_pd()
179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list) in sctp_ulpq_tail_event() argument
184 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
207 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) in sctp_ulpq_tail_event()
218 if (ulpq->pd_mode) { in sctp_ulpq_tail_event()
252 sctp_ulpq_clear_pd(ulpq); in sctp_ulpq_tail_event()
273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_store_reasm() argument
283 pos = skb_peek_tail(&ulpq->reasm); in sctp_ulpq_store_reasm()
285 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
293 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
298 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_store_reasm()
307 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_reassembled() argument
431 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_reassembled()
441 if (skb_queue_is_first(&ulpq->reasm, pos)) { in sctp_ulpq_retrieve_reassembled()
475 asoc = ulpq->asoc; in sctp_ulpq_retrieve_reassembled()
490 &ulpq->reasm, in sctp_ulpq_retrieve_reassembled()
493 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_retrieve_reassembled()
499 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, in sctp_ulpq_retrieve_reassembled()
500 &ulpq->reasm, first_frag, pos); in sctp_ulpq_retrieve_reassembled()
507 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_partial() argument
520 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_partial()
528 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_partial()
565 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_partial()
577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_reasm() argument
588 sctp_ulpq_store_reasm(ulpq, event); in sctp_ulpq_reasm()
589 if (!ulpq->pd_mode) in sctp_ulpq_reasm()
590 retval = sctp_ulpq_retrieve_reassembled(ulpq); in sctp_ulpq_reasm()
598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); in sctp_ulpq_reasm()
600 retval = sctp_ulpq_retrieve_partial(ulpq); in sctp_ulpq_reasm()
607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_first() argument
619 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_first()
626 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_first()
666 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_first()
685 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) in sctp_ulpq_reasm_flushtsn() argument
691 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_flushtsn()
694 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { in sctp_ulpq_reasm_flushtsn()
704 __skb_unlink(pos, &ulpq->reasm); in sctp_ulpq_reasm_flushtsn()
716 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) in sctp_ulpq_reasm_drain() argument
720 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_drain()
723 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { in sctp_ulpq_reasm_drain()
731 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_reasm_drain()
737 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_reasm_drain()
745 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_retrieve_ordered() argument
755 stream = &ulpq->asoc->stream; in sctp_ulpq_retrieve_ordered()
760 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { in sctp_ulpq_retrieve_ordered()
779 __skb_unlink(pos, &ulpq->lobby); in sctp_ulpq_retrieve_ordered()
787 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_store_ordered() argument
795 pos = skb_peek_tail(&ulpq->lobby); in sctp_ulpq_store_ordered()
797 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
808 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
813 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
820 skb_queue_walk(&ulpq->lobby, pos) { in sctp_ulpq_store_ordered()
833 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
836 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, in sctp_ulpq_order() argument
849 stream = &ulpq->asoc->stream; in sctp_ulpq_order()
856 sctp_ulpq_store_ordered(ulpq, event); in sctp_ulpq_order()
866 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_order()
874 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) in sctp_ulpq_reap_ordered() argument
881 struct sk_buff_head *lobby = &ulpq->lobby; in sctp_ulpq_reap_ordered()
884 stream = &ulpq->asoc->stream; in sctp_ulpq_reap_ordered()
936 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_reap_ordered()
937 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_reap_ordered()
944 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) in sctp_ulpq_skip() argument
949 stream = &ulpq->asoc->stream; in sctp_ulpq_skip()
961 sctp_ulpq_reap_ordered(ulpq, sid); in sctp_ulpq_skip()
964 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list, in sctp_ulpq_renege_list() argument
973 tsnmap = &ulpq->asoc->peer.tsn_map; in sctp_ulpq_renege_list()
1013 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_order() argument
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); in sctp_ulpq_renege_order()
1019 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_frags() argument
1021 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); in sctp_ulpq_renege_frags()
1025 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, in sctp_ulpq_partial_delivery() argument
1034 asoc = ulpq->asoc; in sctp_ulpq_partial_delivery()
1040 if (ulpq->pd_mode) in sctp_ulpq_partial_delivery()
1046 skb = skb_peek(&asoc->ulpq.reasm); in sctp_ulpq_partial_delivery()
1060 event = sctp_ulpq_retrieve_first(ulpq); in sctp_ulpq_partial_delivery()
1067 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_partial_delivery()
1068 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_partial_delivery()
1075 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_renege() argument
1078 struct sctp_association *asoc = ulpq->asoc; in sctp_ulpq_renege()
1086 freed = sctp_ulpq_renege_order(ulpq, needed); in sctp_ulpq_renege()
1088 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); in sctp_ulpq_renege()
1093 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); in sctp_ulpq_renege()
1099 sctp_ulpq_partial_delivery(ulpq, gfp); in sctp_ulpq_renege()
1101 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_renege()
1108 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) in sctp_ulpq_abort_pd() argument
1114 if (!ulpq->pd_mode) in sctp_ulpq_abort_pd()
1117 sk = ulpq->asoc->base.sk; in sctp_ulpq_abort_pd()
1119 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe, in sctp_ulpq_abort_pd()
1121 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, in sctp_ulpq_abort_pd()
1128 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) { in sctp_ulpq_abort_pd()