Lines Matching refs:i2400m

185 	struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);  in i2400m_report_hook_work()  local
186 struct device *dev = i2400m_dev(i2400m); in i2400m_report_hook_work()
192 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_report_hook_work()
193 list_splice_init(&i2400m->rx_reports, &list); in i2400m_report_hook_work()
194 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_report_hook_work()
201 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size); in i2400m_report_hook_work()
214 void i2400m_report_hook_flush(struct i2400m *i2400m) in i2400m_report_hook_flush() argument
216 struct device *dev = i2400m_dev(i2400m); in i2400m_report_hook_flush()
222 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_report_hook_flush()
223 list_splice_init(&i2400m->rx_reports, &list); in i2400m_report_hook_flush()
224 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_report_hook_flush()
243 void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx, in i2400m_report_hook_queue() argument
246 struct device *dev = i2400m_dev(i2400m); in i2400m_report_hook_queue()
255 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_report_hook_queue()
256 list_add_tail(&args->list_node, &i2400m->rx_reports); in i2400m_report_hook_queue()
257 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_report_hook_queue()
260 if (likely(i2400m->ready)) /* only send if up */ in i2400m_report_hook_queue()
261 queue_work(i2400m->work_queue, &i2400m->rx_report_ws); in i2400m_report_hook_queue()
286 void i2400m_rx_ctl_ack(struct i2400m *i2400m, in i2400m_rx_ctl_ack() argument
289 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_ctl_ack()
290 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; in i2400m_rx_ctl_ack()
295 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx_ctl_ack()
296 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { in i2400m_rx_ctl_ack()
300 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_ctl_ack()
305 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx_ctl_ack()
306 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { in i2400m_rx_ctl_ack()
312 i2400m->ack_skb = ack_skb; in i2400m_rx_ctl_ack()
313 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_ctl_ack()
314 complete(&i2400m->msg_completion); in i2400m_rx_ctl_ack()
321 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_ctl_ack()
358 void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx, in i2400m_rx_ctl() argument
362 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_ctl()
366 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); in i2400m_rx_ctl()
400 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size); in i2400m_rx_ctl()
401 if (unlikely(i2400m->trace_msg_from_user)) in i2400m_rx_ctl()
402 wimax_msg(&i2400m->wimax_dev, "echo", in i2400m_rx_ctl()
404 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size, in i2400m_rx_ctl()
410 i2400m_rx_ctl_ack(i2400m, payload, size); in i2400m_rx_ctl()
434 void i2400m_rx_trace(struct i2400m *i2400m, in i2400m_rx_trace() argument
438 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_trace()
439 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; in i2400m_rx_trace()
443 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); in i2400m_rx_trace()
505 unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq) in __i2400m_roq_index() argument
507 return ((unsigned long) roq - (unsigned long) i2400m->rx_roq) in __i2400m_roq_index()
553 void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index, in i2400m_roq_log_entry_print() argument
557 struct device *dev = i2400m_dev(i2400m); in i2400m_roq_log_entry_print()
588 void i2400m_roq_log_add(struct i2400m *i2400m, in i2400m_roq_log_add() argument
595 int index = __i2400m_roq_index(i2400m, roq); in i2400m_roq_log_add()
611 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); in i2400m_roq_log_add()
617 void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq) in i2400m_roq_log_dump() argument
621 int index = __i2400m_roq_index(i2400m, roq); in i2400m_roq_log_dump()
627 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); in i2400m_roq_log_dump()
657 void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, in __i2400m_roq_queue() argument
660 struct device *dev = i2400m_dev(i2400m); in __i2400m_roq_queue()
666 i2400m, roq, skb, sn, nsn); in __i2400m_roq_queue()
725 i2400m, roq, skb, sn, nsn); in __i2400m_roq_queue()
741 unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, in __i2400m_roq_update_ws() argument
744 struct device *dev = i2400m_dev(i2400m); in __i2400m_roq_update_ws()
766 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); in __i2400m_roq_update_ws()
786 void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq) in i2400m_roq_reset() argument
788 struct device *dev = i2400m_dev(i2400m); in i2400m_roq_reset()
792 d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq); in i2400m_roq_reset()
793 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET, in i2400m_roq_reset()
801 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); in i2400m_roq_reset()
804 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); in i2400m_roq_reset()
821 void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, in i2400m_roq_queue() argument
824 struct device *dev = i2400m_dev(i2400m); in i2400m_roq_queue()
828 i2400m, roq, skb, lbn); in i2400m_roq_queue()
834 i2400m_roq_log_dump(i2400m, roq); in i2400m_roq_queue()
835 i2400m_reset(i2400m, I2400M_RT_WARM); in i2400m_roq_queue()
837 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); in i2400m_roq_queue()
838 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, in i2400m_roq_queue()
842 i2400m, roq, skb, lbn); in i2400m_roq_queue()
855 void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, in i2400m_roq_update_ws() argument
858 struct device *dev = i2400m_dev(i2400m); in i2400m_roq_update_ws()
861 d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn); in i2400m_roq_update_ws()
864 nsn = __i2400m_roq_update_ws(i2400m, roq, sn); in i2400m_roq_update_ws()
865 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, in i2400m_roq_update_ws()
867 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); in i2400m_roq_update_ws()
884 void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, in i2400m_roq_queue_update_ws() argument
887 struct device *dev = i2400m_dev(i2400m); in i2400m_roq_queue_update_ws()
891 i2400m, roq, skb, sn); in i2400m_roq_queue_update_ws()
907 i2400m_net_erx(i2400m, skb, roq_data->cs); in i2400m_roq_queue_update_ws()
909 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); in i2400m_roq_queue_update_ws()
911 __i2400m_roq_update_ws(i2400m, roq, sn + 1); in i2400m_roq_queue_update_ws()
912 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, in i2400m_roq_queue_update_ws()
916 i2400m, roq, skb, sn); in i2400m_roq_queue_update_ws()
930 struct i2400m *i2400m in i2400m_rx_roq_destroy() local
931 = container_of(ref, struct i2400m, rx_roq_refcount); in i2400m_rx_roq_destroy()
933 __skb_queue_purge(&i2400m->rx_roq[itr].queue); in i2400m_rx_roq_destroy()
934 kfree(i2400m->rx_roq[0].log); in i2400m_rx_roq_destroy()
935 kfree(i2400m->rx_roq); in i2400m_rx_roq_destroy()
936 i2400m->rx_roq = NULL; in i2400m_rx_roq_destroy()
980 void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, in i2400m_rx_edata() argument
983 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_edata()
985 struct net_device *net_dev = i2400m->wimax_dev.net_dev; in i2400m_rx_edata()
997 "size %zu)\n", i2400m, skb_rx, single_last, payload, size); in i2400m_rx_edata()
1031 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx_edata()
1032 if (i2400m->rx_roq == NULL) { in i2400m_rx_edata()
1034 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_edata()
1037 roq = &i2400m->rx_roq[ro_cin]; in i2400m_rx_edata()
1038 kref_get(&i2400m->rx_roq_refcount); in i2400m_rx_edata()
1039 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_edata()
1051 i2400m_roq_reset(i2400m, roq); in i2400m_rx_edata()
1055 i2400m_roq_queue(i2400m, roq, skb, ro_sn); in i2400m_rx_edata()
1058 i2400m_roq_update_ws(i2400m, roq, ro_sn); in i2400m_rx_edata()
1062 i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn); in i2400m_rx_edata()
1068 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx_edata()
1069 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); in i2400m_rx_edata()
1070 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_edata()
1073 i2400m_net_erx(i2400m, skb, cs); in i2400m_rx_edata()
1077 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); in i2400m_rx_edata()
1097 void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx, in i2400m_rx_payload() argument
1101 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_payload()
1112 i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size); in i2400m_rx_payload()
1115 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size); in i2400m_rx_payload()
1118 i2400m_rx_trace(i2400m, payload, pl_size); in i2400m_rx_payload()
1122 i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size); in i2400m_rx_payload()
1143 int i2400m_rx_msg_hdr_check(struct i2400m *i2400m, in i2400m_rx_msg_hdr_check() argument
1148 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_msg_hdr_check()
1188 int i2400m_rx_pl_descr_check(struct i2400m *i2400m, in i2400m_rx_pl_descr_check() argument
1193 struct device *dev = i2400m_dev(i2400m); in i2400m_rx_pl_descr_check()
1197 if (pl_size > i2400m->bus_pl_size_max) { in i2400m_rx_pl_descr_check()
1200 pl_itr, pl_size, i2400m->bus_pl_size_max); in i2400m_rx_pl_descr_check()
1244 int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) in i2400m_rx() argument
1247 struct device *dev = i2400m_dev(i2400m); in i2400m_rx()
1255 i2400m, skb, skb_len); in i2400m_rx()
1257 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); in i2400m_rx()
1275 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], in i2400m_rx()
1280 i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i], in i2400m_rx()
1287 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx()
1288 i2400m->rx_pl_num += i; in i2400m_rx()
1289 if (i > i2400m->rx_pl_max) in i2400m_rx()
1290 i2400m->rx_pl_max = i; in i2400m_rx()
1291 if (i < i2400m->rx_pl_min) in i2400m_rx()
1292 i2400m->rx_pl_min = i; in i2400m_rx()
1293 i2400m->rx_num++; in i2400m_rx()
1294 i2400m->rx_size_acc += skb_len; in i2400m_rx()
1295 if (skb_len < i2400m->rx_size_min) in i2400m_rx()
1296 i2400m->rx_size_min = skb_len; in i2400m_rx()
1297 if (skb_len > i2400m->rx_size_max) in i2400m_rx()
1298 i2400m->rx_size_max = skb_len; in i2400m_rx()
1299 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx()
1304 i2400m, skb, skb_len, result); in i2400m_rx()
1310 void i2400m_unknown_barker(struct i2400m *i2400m, in i2400m_unknown_barker() argument
1313 struct device *dev = i2400m_dev(i2400m); in i2400m_unknown_barker()
1345 int i2400m_rx_setup(struct i2400m *i2400m) in i2400m_rx_setup() argument
1349 i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; in i2400m_rx_setup()
1350 if (i2400m->rx_reorder) { in i2400m_rx_setup()
1356 i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1, in i2400m_rx_setup()
1357 sizeof(i2400m->rx_roq[0]), GFP_KERNEL); in i2400m_rx_setup()
1358 if (i2400m->rx_roq == NULL) in i2400m_rx_setup()
1361 rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log), in i2400m_rx_setup()
1369 __i2400m_roq_init(&i2400m->rx_roq[itr]); in i2400m_rx_setup()
1370 i2400m->rx_roq[itr].log = &rd[itr]; in i2400m_rx_setup()
1372 kref_init(&i2400m->rx_roq_refcount); in i2400m_rx_setup()
1377 kfree(i2400m->rx_roq); in i2400m_rx_setup()
1384 void i2400m_rx_release(struct i2400m *i2400m) in i2400m_rx_release() argument
1388 if (i2400m->rx_reorder) { in i2400m_rx_release()
1389 spin_lock_irqsave(&i2400m->rx_lock, flags); in i2400m_rx_release()
1390 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); in i2400m_rx_release()
1391 spin_unlock_irqrestore(&i2400m->rx_lock, flags); in i2400m_rx_release()
1394 i2400m_report_hook_flush(i2400m); in i2400m_rx_release()