Lines Matching refs:ulpq

47 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
56 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, in sctp_ulpq_init() argument
59 memset(ulpq, 0, sizeof(struct sctp_ulpq)); in sctp_ulpq_init()
61 ulpq->asoc = asoc; in sctp_ulpq_init()
62 skb_queue_head_init(&ulpq->reasm); in sctp_ulpq_init()
63 skb_queue_head_init(&ulpq->reasm_uo); in sctp_ulpq_init()
64 skb_queue_head_init(&ulpq->lobby); in sctp_ulpq_init()
65 ulpq->pd_mode = 0; in sctp_ulpq_init()
67 return ulpq; in sctp_ulpq_init()
72 void sctp_ulpq_flush(struct sctp_ulpq *ulpq) in sctp_ulpq_flush() argument
77 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { in sctp_ulpq_flush()
82 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { in sctp_ulpq_flush()
87 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) { in sctp_ulpq_flush()
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq) in sctp_ulpq_free() argument
96 sctp_ulpq_flush(ulpq); in sctp_ulpq_free()
100 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_tail_data() argument
116 event = sctp_ulpq_reasm(ulpq, event); in sctp_ulpq_tail_data()
124 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_tail_data()
132 sctp_ulpq_tail_event(ulpq, event); in sctp_ulpq_tail_data()
180 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_set_pd() argument
182 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
185 ulpq->pd_mode = 1; in sctp_ulpq_set_pd()
189 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_clear_pd() argument
191 ulpq->pd_mode = 0; in sctp_ulpq_clear_pd()
192 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_clear_pd()
193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
199 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) in sctp_ulpq_tail_event() argument
201 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
233 if (ulpq->pd_mode) { in sctp_ulpq_tail_event()
273 sctp_ulpq_clear_pd(ulpq); in sctp_ulpq_tail_event()
294 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_store_reasm() argument
304 pos = skb_peek_tail(&ulpq->reasm); in sctp_ulpq_store_reasm()
306 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
314 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
319 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_store_reasm()
328 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
420 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_reassembled() argument
452 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_reassembled()
462 if (pos == ulpq->reasm.next) { in sctp_ulpq_retrieve_reassembled()
496 asoc = ulpq->asoc; in sctp_ulpq_retrieve_reassembled()
511 &ulpq->reasm, in sctp_ulpq_retrieve_reassembled()
515 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_retrieve_reassembled()
521 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_reassembled()
522 &ulpq->reasm, first_frag, pos); in sctp_ulpq_retrieve_reassembled()
529 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_partial() argument
542 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_partial()
550 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_partial()
587 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_partial()
588 &ulpq->reasm, first_frag, last_frag); in sctp_ulpq_retrieve_partial()
599 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_reasm() argument
610 sctp_ulpq_store_reasm(ulpq, event); in sctp_ulpq_reasm()
611 if (!ulpq->pd_mode) in sctp_ulpq_reasm()
612 retval = sctp_ulpq_retrieve_reassembled(ulpq); in sctp_ulpq_reasm()
620 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); in sctp_ulpq_reasm()
622 retval = sctp_ulpq_retrieve_partial(ulpq); in sctp_ulpq_reasm()
629 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_first() argument
641 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_first()
648 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_first()
688 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), in sctp_ulpq_retrieve_first()
689 &ulpq->reasm, first_frag, last_frag); in sctp_ulpq_retrieve_first()
707 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) in sctp_ulpq_reasm_flushtsn() argument
713 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_flushtsn()
716 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { in sctp_ulpq_reasm_flushtsn()
726 __skb_unlink(pos, &ulpq->reasm); in sctp_ulpq_reasm_flushtsn()
738 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) in sctp_ulpq_reasm_drain() argument
743 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_drain()
746 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { in sctp_ulpq_reasm_drain()
752 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_reasm_drain()
759 sctp_ulpq_tail_event(ulpq, event); in sctp_ulpq_reasm_drain()
767 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_retrieve_ordered() argument
777 stream = &ulpq->asoc->stream; in sctp_ulpq_retrieve_ordered()
782 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { in sctp_ulpq_retrieve_ordered()
801 __skb_unlink(pos, &ulpq->lobby); in sctp_ulpq_retrieve_ordered()
809 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_store_ordered() argument
817 pos = skb_peek_tail(&ulpq->lobby); in sctp_ulpq_store_ordered()
819 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
830 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
835 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
842 skb_queue_walk(&ulpq->lobby, pos) { in sctp_ulpq_store_ordered()
855 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
858 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, in sctp_ulpq_order() argument
871 stream = &ulpq->asoc->stream; in sctp_ulpq_order()
878 sctp_ulpq_store_ordered(ulpq, event); in sctp_ulpq_order()
888 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_order()
896 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) in sctp_ulpq_reap_ordered() argument
903 struct sk_buff_head *lobby = &ulpq->lobby; in sctp_ulpq_reap_ordered()
906 stream = &ulpq->asoc->stream; in sctp_ulpq_reap_ordered()
958 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_reap_ordered()
959 sctp_ulpq_tail_event(ulpq, event); in sctp_ulpq_reap_ordered()
966 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) in sctp_ulpq_skip() argument
971 stream = &ulpq->asoc->stream; in sctp_ulpq_skip()
983 sctp_ulpq_reap_ordered(ulpq, sid); in sctp_ulpq_skip()
986 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list, in sctp_ulpq_renege_list() argument
995 tsnmap = &ulpq->asoc->peer.tsn_map; in sctp_ulpq_renege_list()
1035 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_order() argument
1037 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); in sctp_ulpq_renege_order()
1041 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_frags() argument
1043 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); in sctp_ulpq_renege_frags()
1047 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, in sctp_ulpq_partial_delivery() argument
1056 asoc = ulpq->asoc; in sctp_ulpq_partial_delivery()
1062 if (ulpq->pd_mode) in sctp_ulpq_partial_delivery()
1068 skb = skb_peek(&asoc->ulpq.reasm); in sctp_ulpq_partial_delivery()
1082 event = sctp_ulpq_retrieve_first(ulpq); in sctp_ulpq_partial_delivery()
1085 sctp_ulpq_tail_event(ulpq, event); in sctp_ulpq_partial_delivery()
1086 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_partial_delivery()
1093 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_renege() argument
1096 struct sctp_association *asoc = ulpq->asoc; in sctp_ulpq_renege()
1104 freed = sctp_ulpq_renege_order(ulpq, needed); in sctp_ulpq_renege()
1106 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); in sctp_ulpq_renege()
1110 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); in sctp_ulpq_renege()
1116 sctp_ulpq_partial_delivery(ulpq, gfp); in sctp_ulpq_renege()
1118 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_renege()
1129 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) in sctp_ulpq_abort_pd() argument
1135 if (!ulpq->pd_mode) in sctp_ulpq_abort_pd()
1138 sk = ulpq->asoc->base.sk; in sctp_ulpq_abort_pd()
1142 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, in sctp_ulpq_abort_pd()
1149 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) { in sctp_ulpq_abort_pd()