1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/wait.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/if_ether.h>
16 #include <linux/netdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/nls.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/ucs2_string.h>
22
23 #include "hyperv_net.h"
24 #include "netvsc_trace.h"
25
26 static void rndis_set_multicast(struct work_struct *w);
27
28 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
29 struct rndis_request {
30 struct list_head list_ent;
31 struct completion wait_event;
32
33 struct rndis_message response_msg;
34 /*
35 * The buffer for extended info after the RNDIS response message. It's
36 * referenced based on the data offset in the RNDIS message. Its size
37 * is enough for current needs, and should be sufficient for the near
38 * future.
39 */
40 u8 response_ext[RNDIS_EXT_LEN];
41
42 /* Simplify allocation by having a netvsc packet inline */
43 struct hv_netvsc_packet pkt;
44
45 struct rndis_message request_msg;
46 /*
47 * The buffer for the extended info after the RNDIS request message.
48 * It is referenced and sized in a similar way as response_ext.
49 */
50 u8 request_ext[RNDIS_EXT_LEN];
51 };
52
53 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
54 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
55 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
56 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
57 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
58 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
59 };
60
get_rndis_device(void)61 static struct rndis_device *get_rndis_device(void)
62 {
63 struct rndis_device *device;
64
65 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
66 if (!device)
67 return NULL;
68
69 spin_lock_init(&device->request_lock);
70
71 INIT_LIST_HEAD(&device->req_list);
72 INIT_WORK(&device->mcast_work, rndis_set_multicast);
73
74 device->state = RNDIS_DEV_UNINITIALIZED;
75
76 return device;
77 }
78
get_rndis_request(struct rndis_device * dev,u32 msg_type,u32 msg_len)79 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
80 u32 msg_type,
81 u32 msg_len)
82 {
83 struct rndis_request *request;
84 struct rndis_message *rndis_msg;
85 struct rndis_set_request *set;
86 unsigned long flags;
87
88 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
89 if (!request)
90 return NULL;
91
92 init_completion(&request->wait_event);
93
94 rndis_msg = &request->request_msg;
95 rndis_msg->ndis_msg_type = msg_type;
96 rndis_msg->msg_len = msg_len;
97
98 request->pkt.q_idx = 0;
99
100 /*
101 * Set the request id. This field is always after the rndis header for
102 * request/response packet types so we just used the SetRequest as a
103 * template
104 */
105 set = &rndis_msg->msg.set_req;
106 set->req_id = atomic_inc_return(&dev->new_req_id);
107
108 /* Add to the request list */
109 spin_lock_irqsave(&dev->request_lock, flags);
110 list_add_tail(&request->list_ent, &dev->req_list);
111 spin_unlock_irqrestore(&dev->request_lock, flags);
112
113 return request;
114 }
115
put_rndis_request(struct rndis_device * dev,struct rndis_request * req)116 static void put_rndis_request(struct rndis_device *dev,
117 struct rndis_request *req)
118 {
119 unsigned long flags;
120
121 spin_lock_irqsave(&dev->request_lock, flags);
122 list_del(&req->list_ent);
123 spin_unlock_irqrestore(&dev->request_lock, flags);
124
125 kfree(req);
126 }
127
dump_rndis_message(struct net_device * netdev,const struct rndis_message * rndis_msg)128 static void dump_rndis_message(struct net_device *netdev,
129 const struct rndis_message *rndis_msg)
130 {
131 switch (rndis_msg->ndis_msg_type) {
132 case RNDIS_MSG_PACKET:
133 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
134 "data offset %u data len %u, # oob %u, "
135 "oob offset %u, oob len %u, pkt offset %u, "
136 "pkt len %u\n",
137 rndis_msg->msg_len,
138 rndis_msg->msg.pkt.data_offset,
139 rndis_msg->msg.pkt.data_len,
140 rndis_msg->msg.pkt.num_oob_data_elements,
141 rndis_msg->msg.pkt.oob_data_offset,
142 rndis_msg->msg.pkt.oob_data_len,
143 rndis_msg->msg.pkt.per_pkt_info_offset,
144 rndis_msg->msg.pkt.per_pkt_info_len);
145 break;
146
147 case RNDIS_MSG_INIT_C:
148 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
149 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
150 "device flags %d, max xfer size 0x%x, max pkts %u, "
151 "pkt aligned %u)\n",
152 rndis_msg->msg_len,
153 rndis_msg->msg.init_complete.req_id,
154 rndis_msg->msg.init_complete.status,
155 rndis_msg->msg.init_complete.major_ver,
156 rndis_msg->msg.init_complete.minor_ver,
157 rndis_msg->msg.init_complete.dev_flags,
158 rndis_msg->msg.init_complete.max_xfer_size,
159 rndis_msg->msg.init_complete.
160 max_pkt_per_msg,
161 rndis_msg->msg.init_complete.
162 pkt_alignment_factor);
163 break;
164
165 case RNDIS_MSG_QUERY_C:
166 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
167 "(len %u, id 0x%x, status 0x%x, buf len %u, "
168 "buf offset %u)\n",
169 rndis_msg->msg_len,
170 rndis_msg->msg.query_complete.req_id,
171 rndis_msg->msg.query_complete.status,
172 rndis_msg->msg.query_complete.
173 info_buflen,
174 rndis_msg->msg.query_complete.
175 info_buf_offset);
176 break;
177
178 case RNDIS_MSG_SET_C:
179 netdev_dbg(netdev,
180 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
181 rndis_msg->msg_len,
182 rndis_msg->msg.set_complete.req_id,
183 rndis_msg->msg.set_complete.status);
184 break;
185
186 case RNDIS_MSG_INDICATE:
187 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
188 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
189 rndis_msg->msg_len,
190 rndis_msg->msg.indicate_status.status,
191 rndis_msg->msg.indicate_status.status_buflen,
192 rndis_msg->msg.indicate_status.status_buf_offset);
193 break;
194
195 default:
196 netdev_dbg(netdev, "0x%x (len %u)\n",
197 rndis_msg->ndis_msg_type,
198 rndis_msg->msg_len);
199 break;
200 }
201 }
202
rndis_filter_send_request(struct rndis_device * dev,struct rndis_request * req)203 static int rndis_filter_send_request(struct rndis_device *dev,
204 struct rndis_request *req)
205 {
206 struct hv_netvsc_packet *packet;
207 struct hv_page_buffer page_buf[2];
208 struct hv_page_buffer *pb = page_buf;
209 int ret;
210
211 /* Setup the packet to send it */
212 packet = &req->pkt;
213
214 packet->total_data_buflen = req->request_msg.msg_len;
215 packet->page_buf_cnt = 1;
216
217 pb[0].pfn = virt_to_phys(&req->request_msg) >>
218 HV_HYP_PAGE_SHIFT;
219 pb[0].len = req->request_msg.msg_len;
220 pb[0].offset = offset_in_hvpage(&req->request_msg);
221
222 /* Add one page_buf when request_msg crossing page boundary */
223 if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
224 packet->page_buf_cnt++;
225 pb[0].len = HV_HYP_PAGE_SIZE -
226 pb[0].offset;
227 pb[1].pfn = virt_to_phys((void *)&req->request_msg
228 + pb[0].len) >> HV_HYP_PAGE_SHIFT;
229 pb[1].offset = 0;
230 pb[1].len = req->request_msg.msg_len -
231 pb[0].len;
232 }
233
234 trace_rndis_send(dev->ndev, 0, &req->request_msg);
235
236 rcu_read_lock_bh();
237 ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
238 rcu_read_unlock_bh();
239
240 return ret;
241 }
242
rndis_set_link_state(struct rndis_device * rdev,struct rndis_request * request)243 static void rndis_set_link_state(struct rndis_device *rdev,
244 struct rndis_request *request)
245 {
246 u32 link_status;
247 struct rndis_query_complete *query_complete;
248
249 query_complete = &request->response_msg.msg.query_complete;
250
251 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
252 query_complete->info_buflen == sizeof(u32)) {
253 memcpy(&link_status, (void *)((unsigned long)query_complete +
254 query_complete->info_buf_offset), sizeof(u32));
255 rdev->link_state = link_status != 0;
256 }
257 }
258
rndis_filter_receive_response(struct net_device * ndev,struct netvsc_device * nvdev,const struct rndis_message * resp)259 static void rndis_filter_receive_response(struct net_device *ndev,
260 struct netvsc_device *nvdev,
261 const struct rndis_message *resp)
262 {
263 struct rndis_device *dev = nvdev->extension;
264 struct rndis_request *request = NULL;
265 bool found = false;
266 unsigned long flags;
267
268 /* This should never happen, it means control message
269 * response received after device removed.
270 */
271 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
272 netdev_err(ndev,
273 "got rndis message uninitialized\n");
274 return;
275 }
276
277 /* Ensure the packet is big enough to read req_id. Req_id is the 1st
278 * field in any request/response message, so the payload should have at
279 * least sizeof(u32) bytes
280 */
281 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
282 netdev_err(ndev, "rndis msg_len too small: %u\n",
283 resp->msg_len);
284 return;
285 }
286
287 spin_lock_irqsave(&dev->request_lock, flags);
288 list_for_each_entry(request, &dev->req_list, list_ent) {
289 /*
290 * All request/response message contains RequestId as the 1st
291 * field
292 */
293 if (request->request_msg.msg.init_req.req_id
294 == resp->msg.init_complete.req_id) {
295 found = true;
296 break;
297 }
298 }
299 spin_unlock_irqrestore(&dev->request_lock, flags);
300
301 if (found) {
302 if (resp->msg_len <=
303 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
304 memcpy(&request->response_msg, resp,
305 resp->msg_len);
306 if (request->request_msg.ndis_msg_type ==
307 RNDIS_MSG_QUERY && request->request_msg.msg.
308 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
309 rndis_set_link_state(dev, request);
310 } else {
311 netdev_err(ndev,
312 "rndis response buffer overflow "
313 "detected (size %u max %zu)\n",
314 resp->msg_len,
315 sizeof(struct rndis_message));
316
317 if (resp->ndis_msg_type ==
318 RNDIS_MSG_RESET_C) {
319 /* does not have a request id field */
320 request->response_msg.msg.reset_complete.
321 status = RNDIS_STATUS_BUFFER_OVERFLOW;
322 } else {
323 request->response_msg.msg.
324 init_complete.status =
325 RNDIS_STATUS_BUFFER_OVERFLOW;
326 }
327 }
328
329 complete(&request->wait_event);
330 } else {
331 netdev_err(ndev,
332 "no rndis request found for this response "
333 "(id 0x%x res type 0x%x)\n",
334 resp->msg.init_complete.req_id,
335 resp->ndis_msg_type);
336 }
337 }
338
339 /*
340 * Get the Per-Packet-Info with the specified type
341 * return NULL if not found.
342 */
rndis_get_ppi(struct net_device * ndev,struct rndis_packet * rpkt,u32 rpkt_len,u32 type,u8 internal)343 static inline void *rndis_get_ppi(struct net_device *ndev,
344 struct rndis_packet *rpkt,
345 u32 rpkt_len, u32 type, u8 internal)
346 {
347 struct rndis_per_packet_info *ppi;
348 int len;
349
350 if (rpkt->per_pkt_info_offset == 0)
351 return NULL;
352
353 /* Validate info_offset and info_len */
354 if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
355 rpkt->per_pkt_info_offset > rpkt_len) {
356 netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
357 rpkt->per_pkt_info_offset);
358 return NULL;
359 }
360
361 if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
362 netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
363 rpkt->per_pkt_info_len);
364 return NULL;
365 }
366
367 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
368 rpkt->per_pkt_info_offset);
369 len = rpkt->per_pkt_info_len;
370
371 while (len > 0) {
372 /* Validate ppi_offset and ppi_size */
373 if (ppi->size > len) {
374 netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
375 continue;
376 }
377
378 if (ppi->ppi_offset >= ppi->size) {
379 netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
380 continue;
381 }
382
383 if (ppi->type == type && ppi->internal == internal)
384 return (void *)((ulong)ppi + ppi->ppi_offset);
385 len -= ppi->size;
386 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
387 }
388
389 return NULL;
390 }
391
392 static inline
rsc_add_data(struct netvsc_channel * nvchan,const struct ndis_pkt_8021q_info * vlan,const struct ndis_tcp_ip_checksum_info * csum_info,const u32 * hash_info,void * data,u32 len)393 void rsc_add_data(struct netvsc_channel *nvchan,
394 const struct ndis_pkt_8021q_info *vlan,
395 const struct ndis_tcp_ip_checksum_info *csum_info,
396 const u32 *hash_info,
397 void *data, u32 len)
398 {
399 u32 cnt = nvchan->rsc.cnt;
400
401 if (cnt) {
402 nvchan->rsc.pktlen += len;
403 } else {
404 nvchan->rsc.vlan = vlan;
405 nvchan->rsc.csum_info = csum_info;
406 nvchan->rsc.pktlen = len;
407 nvchan->rsc.hash_info = hash_info;
408 }
409
410 nvchan->rsc.data[cnt] = data;
411 nvchan->rsc.len[cnt] = len;
412 nvchan->rsc.cnt++;
413 }
414
rndis_filter_receive_data(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_channel * nvchan,struct rndis_message * msg,u32 data_buflen)415 static int rndis_filter_receive_data(struct net_device *ndev,
416 struct netvsc_device *nvdev,
417 struct netvsc_channel *nvchan,
418 struct rndis_message *msg,
419 u32 data_buflen)
420 {
421 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
422 const struct ndis_tcp_ip_checksum_info *csum_info;
423 const struct ndis_pkt_8021q_info *vlan;
424 const struct rndis_pktinfo_id *pktinfo_id;
425 const u32 *hash_info;
426 u32 data_offset, rpkt_len;
427 void *data;
428 bool rsc_more = false;
429 int ret;
430
431 /* Ensure data_buflen is big enough to read header fields */
432 if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
433 netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
434 data_buflen);
435 return NVSP_STAT_FAIL;
436 }
437
438 /* Validate rndis_pkt offset */
439 if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
440 netdev_err(ndev, "invalid rndis packet offset: %u\n",
441 rndis_pkt->data_offset);
442 return NVSP_STAT_FAIL;
443 }
444
445 /* Remove the rndis header and pass it back up the stack */
446 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
447
448 rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
449 data_buflen -= data_offset;
450
451 /*
452 * Make sure we got a valid RNDIS message, now total_data_buflen
453 * should be the data packet size plus the trailer padding size
454 */
455 if (unlikely(data_buflen < rndis_pkt->data_len)) {
456 netdev_err(ndev, "rndis message buffer "
457 "overflow detected (got %u, min %u)"
458 "...dropping this message!\n",
459 data_buflen, rndis_pkt->data_len);
460 return NVSP_STAT_FAIL;
461 }
462
463 vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
464
465 csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
466
467 hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
468
469 pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
470
471 data = (void *)msg + data_offset;
472
473 /* Identify RSC frags, drop erroneous packets */
474 if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
475 if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
476 nvchan->rsc.cnt = 0;
477 else if (nvchan->rsc.cnt == 0)
478 goto drop;
479
480 rsc_more = true;
481
482 if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
483 rsc_more = false;
484
485 if (rsc_more && nvchan->rsc.is_last)
486 goto drop;
487 } else {
488 nvchan->rsc.cnt = 0;
489 }
490
491 if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
492 goto drop;
493
494 /* Put data into per channel structure.
495 * Also, remove the rndis trailer padding from rndis packet message
496 * rndis_pkt->data_len tell us the real data length, we only copy
497 * the data packet to the stack, without the rndis trailer padding
498 */
499 rsc_add_data(nvchan, vlan, csum_info, hash_info,
500 data, rndis_pkt->data_len);
501
502 if (rsc_more)
503 return NVSP_STAT_SUCCESS;
504
505 ret = netvsc_recv_callback(ndev, nvdev, nvchan);
506 nvchan->rsc.cnt = 0;
507
508 return ret;
509
510 drop:
511 /* Drop incomplete packet */
512 nvchan->rsc.cnt = 0;
513 return NVSP_STAT_FAIL;
514 }
515
rndis_filter_receive(struct net_device * ndev,struct netvsc_device * net_dev,struct netvsc_channel * nvchan,void * data,u32 buflen)516 int rndis_filter_receive(struct net_device *ndev,
517 struct netvsc_device *net_dev,
518 struct netvsc_channel *nvchan,
519 void *data, u32 buflen)
520 {
521 struct net_device_context *net_device_ctx = netdev_priv(ndev);
522 struct rndis_message *rndis_msg = data;
523
524 if (netif_msg_rx_status(net_device_ctx))
525 dump_rndis_message(ndev, rndis_msg);
526
527 /* Validate incoming rndis_message packet */
528 if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
529 buflen < rndis_msg->msg_len) {
530 netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
531 buflen, rndis_msg->msg_len);
532 return NVSP_STAT_FAIL;
533 }
534
535 switch (rndis_msg->ndis_msg_type) {
536 case RNDIS_MSG_PACKET:
537 return rndis_filter_receive_data(ndev, net_dev, nvchan,
538 rndis_msg, buflen);
539 case RNDIS_MSG_INIT_C:
540 case RNDIS_MSG_QUERY_C:
541 case RNDIS_MSG_SET_C:
542 /* completion msgs */
543 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
544 break;
545
546 case RNDIS_MSG_INDICATE:
547 /* notification msgs */
548 netvsc_linkstatus_callback(ndev, rndis_msg);
549 break;
550 default:
551 netdev_err(ndev,
552 "unhandled rndis message (type %u len %u)\n",
553 rndis_msg->ndis_msg_type,
554 rndis_msg->msg_len);
555 return NVSP_STAT_FAIL;
556 }
557
558 return NVSP_STAT_SUCCESS;
559 }
560
rndis_filter_query_device(struct rndis_device * dev,struct netvsc_device * nvdev,u32 oid,void * result,u32 * result_size)561 static int rndis_filter_query_device(struct rndis_device *dev,
562 struct netvsc_device *nvdev,
563 u32 oid, void *result, u32 *result_size)
564 {
565 struct rndis_request *request;
566 u32 inresult_size = *result_size;
567 struct rndis_query_request *query;
568 struct rndis_query_complete *query_complete;
569 int ret = 0;
570
571 if (!result)
572 return -EINVAL;
573
574 *result_size = 0;
575 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
576 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
577 if (!request) {
578 ret = -ENOMEM;
579 goto cleanup;
580 }
581
582 /* Setup the rndis query */
583 query = &request->request_msg.msg.query_req;
584 query->oid = oid;
585 query->info_buf_offset = sizeof(struct rndis_query_request);
586 query->info_buflen = 0;
587 query->dev_vc_handle = 0;
588
589 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
590 struct ndis_offload *hwcaps;
591 u32 nvsp_version = nvdev->nvsp_version;
592 u8 ndis_rev;
593 size_t size;
594
595 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
596 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
597 size = NDIS_OFFLOAD_SIZE;
598 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
599 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
600 size = NDIS_OFFLOAD_SIZE_6_1;
601 } else {
602 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
603 size = NDIS_OFFLOAD_SIZE_6_0;
604 }
605
606 request->request_msg.msg_len += size;
607 query->info_buflen = size;
608 hwcaps = (struct ndis_offload *)
609 ((unsigned long)query + query->info_buf_offset);
610
611 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
612 hwcaps->header.revision = ndis_rev;
613 hwcaps->header.size = size;
614
615 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
616 struct ndis_recv_scale_cap *cap;
617
618 request->request_msg.msg_len +=
619 sizeof(struct ndis_recv_scale_cap);
620 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
621 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
622 query->info_buf_offset);
623 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
624 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
625 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
626 }
627
628 ret = rndis_filter_send_request(dev, request);
629 if (ret != 0)
630 goto cleanup;
631
632 wait_for_completion(&request->wait_event);
633
634 /* Copy the response back */
635 query_complete = &request->response_msg.msg.query_complete;
636
637 if (query_complete->info_buflen > inresult_size) {
638 ret = -1;
639 goto cleanup;
640 }
641
642 memcpy(result,
643 (void *)((unsigned long)query_complete +
644 query_complete->info_buf_offset),
645 query_complete->info_buflen);
646
647 *result_size = query_complete->info_buflen;
648
649 cleanup:
650 if (request)
651 put_rndis_request(dev, request);
652
653 return ret;
654 }
655
656 /* Get the hardware offload capabilities */
657 static int
rndis_query_hwcaps(struct rndis_device * dev,struct netvsc_device * net_device,struct ndis_offload * caps)658 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
659 struct ndis_offload *caps)
660 {
661 u32 caps_len = sizeof(*caps);
662 int ret;
663
664 memset(caps, 0, sizeof(*caps));
665
666 ret = rndis_filter_query_device(dev, net_device,
667 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
668 caps, &caps_len);
669 if (ret)
670 return ret;
671
672 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
673 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
674 caps->header.type);
675 return -EINVAL;
676 }
677
678 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
679 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
680 caps->header.revision);
681 return -EINVAL;
682 }
683
684 if (caps->header.size > caps_len ||
685 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
686 netdev_warn(dev->ndev,
687 "invalid NDIS objsize %u, data size %u\n",
688 caps->header.size, caps_len);
689 return -EINVAL;
690 }
691
692 return 0;
693 }
694
rndis_filter_query_device_mac(struct rndis_device * dev,struct netvsc_device * net_device)695 static int rndis_filter_query_device_mac(struct rndis_device *dev,
696 struct netvsc_device *net_device)
697 {
698 u32 size = ETH_ALEN;
699
700 return rndis_filter_query_device(dev, net_device,
701 RNDIS_OID_802_3_PERMANENT_ADDRESS,
702 dev->hw_mac_adr, &size);
703 }
704
705 #define NWADR_STR "NetworkAddress"
706 #define NWADR_STRLEN 14
707
rndis_filter_set_device_mac(struct netvsc_device * nvdev,const char * mac)708 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
709 const char *mac)
710 {
711 struct rndis_device *rdev = nvdev->extension;
712 struct rndis_request *request;
713 struct rndis_set_request *set;
714 struct rndis_config_parameter_info *cpi;
715 wchar_t *cfg_nwadr, *cfg_mac;
716 struct rndis_set_complete *set_complete;
717 char macstr[2*ETH_ALEN+1];
718 u32 extlen = sizeof(struct rndis_config_parameter_info) +
719 2*NWADR_STRLEN + 4*ETH_ALEN;
720 int ret;
721
722 request = get_rndis_request(rdev, RNDIS_MSG_SET,
723 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
724 if (!request)
725 return -ENOMEM;
726
727 set = &request->request_msg.msg.set_req;
728 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
729 set->info_buflen = extlen;
730 set->info_buf_offset = sizeof(struct rndis_set_request);
731 set->dev_vc_handle = 0;
732
733 cpi = (struct rndis_config_parameter_info *)((ulong)set +
734 set->info_buf_offset);
735 cpi->parameter_name_offset =
736 sizeof(struct rndis_config_parameter_info);
737 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
738 cpi->parameter_name_length = 2*NWADR_STRLEN;
739 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
740 cpi->parameter_value_offset =
741 cpi->parameter_name_offset + cpi->parameter_name_length;
742 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
743 cpi->parameter_value_length = 4*ETH_ALEN;
744
745 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
746 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
747 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
748 cfg_nwadr, NWADR_STRLEN);
749 if (ret < 0)
750 goto cleanup;
751 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
752 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
753 cfg_mac, 2*ETH_ALEN);
754 if (ret < 0)
755 goto cleanup;
756
757 ret = rndis_filter_send_request(rdev, request);
758 if (ret != 0)
759 goto cleanup;
760
761 wait_for_completion(&request->wait_event);
762
763 set_complete = &request->response_msg.msg.set_complete;
764 if (set_complete->status != RNDIS_STATUS_SUCCESS)
765 ret = -EIO;
766
767 cleanup:
768 put_rndis_request(rdev, request);
769 return ret;
770 }
771
772 int
rndis_filter_set_offload_params(struct net_device * ndev,struct netvsc_device * nvdev,struct ndis_offload_params * req_offloads)773 rndis_filter_set_offload_params(struct net_device *ndev,
774 struct netvsc_device *nvdev,
775 struct ndis_offload_params *req_offloads)
776 {
777 struct rndis_device *rdev = nvdev->extension;
778 struct rndis_request *request;
779 struct rndis_set_request *set;
780 struct ndis_offload_params *offload_params;
781 struct rndis_set_complete *set_complete;
782 u32 extlen = sizeof(struct ndis_offload_params);
783 int ret;
784 u32 vsp_version = nvdev->nvsp_version;
785
786 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
787 extlen = VERSION_4_OFFLOAD_SIZE;
788 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
789 * UDP checksum offload.
790 */
791 req_offloads->udp_ip_v4_csum = 0;
792 req_offloads->udp_ip_v6_csum = 0;
793 }
794
795 request = get_rndis_request(rdev, RNDIS_MSG_SET,
796 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
797 if (!request)
798 return -ENOMEM;
799
800 set = &request->request_msg.msg.set_req;
801 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
802 set->info_buflen = extlen;
803 set->info_buf_offset = sizeof(struct rndis_set_request);
804 set->dev_vc_handle = 0;
805
806 offload_params = (struct ndis_offload_params *)((ulong)set +
807 set->info_buf_offset);
808 *offload_params = *req_offloads;
809 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
810 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
811 offload_params->header.size = extlen;
812
813 ret = rndis_filter_send_request(rdev, request);
814 if (ret != 0)
815 goto cleanup;
816
817 wait_for_completion(&request->wait_event);
818 set_complete = &request->response_msg.msg.set_complete;
819 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
820 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
821 set_complete->status);
822 ret = -EINVAL;
823 }
824
825 cleanup:
826 put_rndis_request(rdev, request);
827 return ret;
828 }
829
rndis_set_rss_param_msg(struct rndis_device * rdev,const u8 * rss_key,u16 flag)830 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
831 const u8 *rss_key, u16 flag)
832 {
833 struct net_device *ndev = rdev->ndev;
834 struct net_device_context *ndc = netdev_priv(ndev);
835 struct rndis_request *request;
836 struct rndis_set_request *set;
837 struct rndis_set_complete *set_complete;
838 u32 extlen = sizeof(struct ndis_recv_scale_param) +
839 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
840 struct ndis_recv_scale_param *rssp;
841 u32 *itab;
842 u8 *keyp;
843 int i, ret;
844
845 request = get_rndis_request(
846 rdev, RNDIS_MSG_SET,
847 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
848 if (!request)
849 return -ENOMEM;
850
851 set = &request->request_msg.msg.set_req;
852 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
853 set->info_buflen = extlen;
854 set->info_buf_offset = sizeof(struct rndis_set_request);
855 set->dev_vc_handle = 0;
856
857 rssp = (struct ndis_recv_scale_param *)(set + 1);
858 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
859 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
860 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
861 rssp->flag = flag;
862 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
863 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
864 NDIS_HASH_TCP_IPV6;
865 rssp->indirect_tabsize = 4*ITAB_NUM;
866 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
867 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
868 rssp->hashkey_offset = rssp->indirect_taboffset +
869 rssp->indirect_tabsize;
870
871 /* Set indirection table entries */
872 itab = (u32 *)(rssp + 1);
873 for (i = 0; i < ITAB_NUM; i++)
874 itab[i] = ndc->rx_table[i];
875
876 /* Set hask key values */
877 keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
878 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
879
880 ret = rndis_filter_send_request(rdev, request);
881 if (ret != 0)
882 goto cleanup;
883
884 wait_for_completion(&request->wait_event);
885 set_complete = &request->response_msg.msg.set_complete;
886 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
887 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
888 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
889 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
890
891 } else {
892 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
893 set_complete->status);
894 ret = -EINVAL;
895 }
896
897 cleanup:
898 put_rndis_request(rdev, request);
899 return ret;
900 }
901
rndis_filter_set_rss_param(struct rndis_device * rdev,const u8 * rss_key)902 int rndis_filter_set_rss_param(struct rndis_device *rdev,
903 const u8 *rss_key)
904 {
905 /* Disable RSS before change */
906 rndis_set_rss_param_msg(rdev, rss_key,
907 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
908
909 return rndis_set_rss_param_msg(rdev, rss_key, 0);
910 }
911
rndis_filter_query_device_link_status(struct rndis_device * dev,struct netvsc_device * net_device)912 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
913 struct netvsc_device *net_device)
914 {
915 u32 size = sizeof(u32);
916 u32 link_status;
917
918 return rndis_filter_query_device(dev, net_device,
919 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
920 &link_status, &size);
921 }
922
rndis_filter_query_link_speed(struct rndis_device * dev,struct netvsc_device * net_device)923 static int rndis_filter_query_link_speed(struct rndis_device *dev,
924 struct netvsc_device *net_device)
925 {
926 u32 size = sizeof(u32);
927 u32 link_speed;
928 struct net_device_context *ndc;
929 int ret;
930
931 ret = rndis_filter_query_device(dev, net_device,
932 RNDIS_OID_GEN_LINK_SPEED,
933 &link_speed, &size);
934
935 if (!ret) {
936 ndc = netdev_priv(dev->ndev);
937
938 /* The link speed reported from host is in 100bps unit, so
939 * we convert it to Mbps here.
940 */
941 ndc->speed = link_speed / 10000;
942 }
943
944 return ret;
945 }
946
rndis_filter_set_packet_filter(struct rndis_device * dev,u32 new_filter)947 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
948 u32 new_filter)
949 {
950 struct rndis_request *request;
951 struct rndis_set_request *set;
952 int ret;
953
954 if (dev->filter == new_filter)
955 return 0;
956
957 request = get_rndis_request(dev, RNDIS_MSG_SET,
958 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
959 sizeof(u32));
960 if (!request)
961 return -ENOMEM;
962
963 /* Setup the rndis set */
964 set = &request->request_msg.msg.set_req;
965 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
966 set->info_buflen = sizeof(u32);
967 set->info_buf_offset = sizeof(struct rndis_set_request);
968
969 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
970 &new_filter, sizeof(u32));
971
972 ret = rndis_filter_send_request(dev, request);
973 if (ret == 0) {
974 wait_for_completion(&request->wait_event);
975 dev->filter = new_filter;
976 }
977
978 put_rndis_request(dev, request);
979
980 return ret;
981 }
982
rndis_set_multicast(struct work_struct * w)983 static void rndis_set_multicast(struct work_struct *w)
984 {
985 struct rndis_device *rdev
986 = container_of(w, struct rndis_device, mcast_work);
987 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
988 unsigned int flags = rdev->ndev->flags;
989
990 if (flags & IFF_PROMISC) {
991 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
992 } else {
993 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
994 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
995 if (flags & IFF_BROADCAST)
996 filter |= NDIS_PACKET_TYPE_BROADCAST;
997 }
998
999 rndis_filter_set_packet_filter(rdev, filter);
1000 }
1001
rndis_filter_update(struct netvsc_device * nvdev)1002 void rndis_filter_update(struct netvsc_device *nvdev)
1003 {
1004 struct rndis_device *rdev = nvdev->extension;
1005
1006 schedule_work(&rdev->mcast_work);
1007 }
1008
rndis_filter_init_device(struct rndis_device * dev,struct netvsc_device * nvdev)1009 static int rndis_filter_init_device(struct rndis_device *dev,
1010 struct netvsc_device *nvdev)
1011 {
1012 struct rndis_request *request;
1013 struct rndis_initialize_request *init;
1014 struct rndis_initialize_complete *init_complete;
1015 u32 status;
1016 int ret;
1017
1018 request = get_rndis_request(dev, RNDIS_MSG_INIT,
1019 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1020 if (!request) {
1021 ret = -ENOMEM;
1022 goto cleanup;
1023 }
1024
1025 /* Setup the rndis set */
1026 init = &request->request_msg.msg.init_req;
1027 init->major_ver = RNDIS_MAJOR_VERSION;
1028 init->minor_ver = RNDIS_MINOR_VERSION;
1029 init->max_xfer_size = 0x4000;
1030
1031 dev->state = RNDIS_DEV_INITIALIZING;
1032
1033 ret = rndis_filter_send_request(dev, request);
1034 if (ret != 0) {
1035 dev->state = RNDIS_DEV_UNINITIALIZED;
1036 goto cleanup;
1037 }
1038
1039 wait_for_completion(&request->wait_event);
1040
1041 init_complete = &request->response_msg.msg.init_complete;
1042 status = init_complete->status;
1043 if (status == RNDIS_STATUS_SUCCESS) {
1044 dev->state = RNDIS_DEV_INITIALIZED;
1045 nvdev->max_pkt = init_complete->max_pkt_per_msg;
1046 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1047 ret = 0;
1048 } else {
1049 dev->state = RNDIS_DEV_UNINITIALIZED;
1050 ret = -EINVAL;
1051 }
1052
1053 cleanup:
1054 if (request)
1055 put_rndis_request(dev, request);
1056
1057 return ret;
1058 }
1059
netvsc_device_idle(const struct netvsc_device * nvdev)1060 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1061 {
1062 int i;
1063
1064 for (i = 0; i < nvdev->num_chn; i++) {
1065 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1066
1067 if (nvchan->mrc.first != nvchan->mrc.next)
1068 return false;
1069
1070 if (atomic_read(&nvchan->queue_sends) > 0)
1071 return false;
1072 }
1073
1074 return true;
1075 }
1076
rndis_filter_halt_device(struct netvsc_device * nvdev,struct rndis_device * dev)1077 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1078 struct rndis_device *dev)
1079 {
1080 struct rndis_request *request;
1081 struct rndis_halt_request *halt;
1082
1083 /* Attempt to do a rndis device halt */
1084 request = get_rndis_request(dev, RNDIS_MSG_HALT,
1085 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1086 if (!request)
1087 goto cleanup;
1088
1089 /* Setup the rndis set */
1090 halt = &request->request_msg.msg.halt_req;
1091 halt->req_id = atomic_inc_return(&dev->new_req_id);
1092
1093 /* Ignore return since this msg is optional. */
1094 rndis_filter_send_request(dev, request);
1095
1096 dev->state = RNDIS_DEV_UNINITIALIZED;
1097
1098 cleanup:
1099 nvdev->destroy = true;
1100
1101 /* Force flag to be ordered before waiting */
1102 wmb();
1103
1104 /* Wait for all send completions */
1105 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1106
1107 if (request)
1108 put_rndis_request(dev, request);
1109 }
1110
rndis_filter_open_device(struct rndis_device * dev)1111 static int rndis_filter_open_device(struct rndis_device *dev)
1112 {
1113 int ret;
1114
1115 if (dev->state != RNDIS_DEV_INITIALIZED)
1116 return 0;
1117
1118 ret = rndis_filter_set_packet_filter(dev,
1119 NDIS_PACKET_TYPE_BROADCAST |
1120 NDIS_PACKET_TYPE_ALL_MULTICAST |
1121 NDIS_PACKET_TYPE_DIRECTED);
1122 if (ret == 0)
1123 dev->state = RNDIS_DEV_DATAINITIALIZED;
1124
1125 return ret;
1126 }
1127
rndis_filter_close_device(struct rndis_device * dev)1128 static int rndis_filter_close_device(struct rndis_device *dev)
1129 {
1130 int ret;
1131
1132 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1133 return 0;
1134
1135 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1136 cancel_work_sync(&dev->mcast_work);
1137
1138 ret = rndis_filter_set_packet_filter(dev, 0);
1139 if (ret == -ENODEV)
1140 ret = 0;
1141
1142 if (ret == 0)
1143 dev->state = RNDIS_DEV_INITIALIZED;
1144
1145 return ret;
1146 }
1147
netvsc_sc_open(struct vmbus_channel * new_sc)1148 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1149 {
1150 struct net_device *ndev =
1151 hv_get_drvdata(new_sc->primary_channel->device_obj);
1152 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1153 struct netvsc_device *nvscdev;
1154 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1155 struct netvsc_channel *nvchan;
1156 int ret;
1157
1158 /* This is safe because this callback only happens when
1159 * new device is being setup and waiting on the channel_init_wait.
1160 */
1161 nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1162 if (!nvscdev || chn_index >= nvscdev->num_chn)
1163 return;
1164
1165 nvchan = nvscdev->chan_table + chn_index;
1166
1167 /* Because the device uses NAPI, all the interrupt batching and
1168 * control is done via Net softirq, not the channel handling
1169 */
1170 set_channel_read_mode(new_sc, HV_CALL_ISR);
1171
1172 /* Set the channel before opening.*/
1173 nvchan->channel = new_sc;
1174
1175 ret = vmbus_open(new_sc, netvsc_ring_bytes,
1176 netvsc_ring_bytes, NULL, 0,
1177 netvsc_channel_cb, nvchan);
1178 if (ret == 0)
1179 napi_enable(&nvchan->napi);
1180 else
1181 netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1182
1183 if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1184 wake_up(&nvscdev->subchan_open);
1185 }
1186
1187 /* Open sub-channels after completing the handling of the device probe.
1188 * This breaks overlap of processing the host message for the
1189 * new primary channel with the initialization of sub-channels.
1190 */
rndis_set_subchannel(struct net_device * ndev,struct netvsc_device * nvdev,struct netvsc_device_info * dev_info)1191 int rndis_set_subchannel(struct net_device *ndev,
1192 struct netvsc_device *nvdev,
1193 struct netvsc_device_info *dev_info)
1194 {
1195 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1196 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1197 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1198 struct rndis_device *rdev = nvdev->extension;
1199 int i, ret;
1200
1201 ASSERT_RTNL();
1202
1203 memset(init_packet, 0, sizeof(struct nvsp_message));
1204 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1205 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1206 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1207 nvdev->num_chn - 1;
1208 trace_nvsp_send(ndev, init_packet);
1209
1210 ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1211 sizeof(struct nvsp_message),
1212 (unsigned long)init_packet,
1213 VM_PKT_DATA_INBAND,
1214 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1215 if (ret) {
1216 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1217 return ret;
1218 }
1219
1220 wait_for_completion(&nvdev->channel_init_wait);
1221 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1222 netdev_err(ndev, "sub channel request failed\n");
1223 return -EIO;
1224 }
1225
1226 nvdev->num_chn = 1 +
1227 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1228
1229 /* wait for all sub channels to open */
1230 wait_event(nvdev->subchan_open,
1231 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1232
1233 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1234 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1235
1236 /* ignore failures from setting rss parameters, still have channels */
1237 if (dev_info)
1238 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1239 else
1240 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1241
1242 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1243 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1244
1245 return 0;
1246 }
1247
rndis_netdev_set_hwcaps(struct rndis_device * rndis_device,struct netvsc_device * nvdev)1248 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1249 struct netvsc_device *nvdev)
1250 {
1251 struct net_device *net = rndis_device->ndev;
1252 struct net_device_context *net_device_ctx = netdev_priv(net);
1253 struct ndis_offload hwcaps;
1254 struct ndis_offload_params offloads;
1255 unsigned int gso_max_size = GSO_MAX_SIZE;
1256 int ret;
1257
1258 /* Find HW offload capabilities */
1259 ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1260 if (ret != 0)
1261 return ret;
1262
1263 /* A value of zero means "no change"; now turn on what we want. */
1264 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1265
1266 /* Linux does not care about IP checksum, always does in kernel */
1267 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1268
1269 /* Reset previously set hw_features flags */
1270 net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1271 net_device_ctx->tx_checksum_mask = 0;
1272
1273 /* Compute tx offload settings based on hw capabilities */
1274 net->hw_features |= NETIF_F_RXCSUM;
1275 net->hw_features |= NETIF_F_SG;
1276 net->hw_features |= NETIF_F_RXHASH;
1277
1278 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1279 /* Can checksum TCP */
1280 net->hw_features |= NETIF_F_IP_CSUM;
1281 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1282
1283 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1284
1285 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1286 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1287 net->hw_features |= NETIF_F_TSO;
1288
1289 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1290 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1291 }
1292
1293 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1294 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1295 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1296 }
1297 }
1298
1299 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1300 net->hw_features |= NETIF_F_IPV6_CSUM;
1301
1302 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1303 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1304
1305 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1306 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1307 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1308 net->hw_features |= NETIF_F_TSO6;
1309
1310 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1311 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1312 }
1313
1314 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1315 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1316 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1317 }
1318 }
1319
1320 if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1321 net->hw_features |= NETIF_F_LRO;
1322
1323 if (net->features & NETIF_F_LRO) {
1324 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1325 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1326 } else {
1327 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1328 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1329 }
1330 }
1331
1332 /* In case some hw_features disappeared we need to remove them from
1333 * net->features list as they're no longer supported.
1334 */
1335 net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1336
1337 netif_set_gso_max_size(net, gso_max_size);
1338
1339 ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1340
1341 return ret;
1342 }
1343
rndis_get_friendly_name(struct net_device * net,struct rndis_device * rndis_device,struct netvsc_device * net_device)1344 static void rndis_get_friendly_name(struct net_device *net,
1345 struct rndis_device *rndis_device,
1346 struct netvsc_device *net_device)
1347 {
1348 ucs2_char_t wname[256];
1349 unsigned long len;
1350 u8 ifalias[256];
1351 u32 size;
1352
1353 size = sizeof(wname);
1354 if (rndis_filter_query_device(rndis_device, net_device,
1355 RNDIS_OID_GEN_FRIENDLY_NAME,
1356 wname, &size) != 0)
1357 return; /* ignore if host does not support */
1358
1359 if (size == 0)
1360 return; /* name not set */
1361
1362 /* Convert Windows Unicode string to UTF-8 */
1363 len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1364
1365 /* ignore the default value from host */
1366 if (strcmp(ifalias, "Network Adapter") != 0)
1367 dev_set_alias(net, ifalias, len);
1368 }
1369
rndis_filter_device_add(struct hv_device * dev,struct netvsc_device_info * device_info)1370 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1371 struct netvsc_device_info *device_info)
1372 {
1373 struct net_device *net = hv_get_drvdata(dev);
1374 struct net_device_context *ndc = netdev_priv(net);
1375 struct netvsc_device *net_device;
1376 struct rndis_device *rndis_device;
1377 struct ndis_recv_scale_cap rsscap;
1378 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1379 u32 mtu, size;
1380 u32 num_possible_rss_qs;
1381 int i, ret;
1382
1383 rndis_device = get_rndis_device();
1384 if (!rndis_device)
1385 return ERR_PTR(-ENODEV);
1386
1387 /* Let the inner driver handle this first to create the netvsc channel
1388 * NOTE! Once the channel is created, we may get a receive callback
1389 * (RndisFilterOnReceive()) before this call is completed
1390 */
1391 net_device = netvsc_device_add(dev, device_info);
1392 if (IS_ERR(net_device)) {
1393 kfree(rndis_device);
1394 return net_device;
1395 }
1396
1397 /* Initialize the rndis device */
1398 net_device->max_chn = 1;
1399 net_device->num_chn = 1;
1400
1401 net_device->extension = rndis_device;
1402 rndis_device->ndev = net;
1403
1404 /* Send the rndis initialization message */
1405 ret = rndis_filter_init_device(rndis_device, net_device);
1406 if (ret != 0)
1407 goto err_dev_remv;
1408
1409 /* Get the MTU from the host */
1410 size = sizeof(u32);
1411 ret = rndis_filter_query_device(rndis_device, net_device,
1412 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1413 &mtu, &size);
1414 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1415 net->mtu = mtu;
1416
1417 /* Get the mac address */
1418 ret = rndis_filter_query_device_mac(rndis_device, net_device);
1419 if (ret != 0)
1420 goto err_dev_remv;
1421
1422 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1423
1424 /* Get friendly name as ifalias*/
1425 if (!net->ifalias)
1426 rndis_get_friendly_name(net, rndis_device, net_device);
1427
1428 /* Query and set hardware capabilities */
1429 ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1430 if (ret != 0)
1431 goto err_dev_remv;
1432
1433 rndis_filter_query_device_link_status(rndis_device, net_device);
1434
1435 netdev_dbg(net, "Device MAC %pM link state %s\n",
1436 rndis_device->hw_mac_adr,
1437 rndis_device->link_state ? "down" : "up");
1438
1439 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1440 goto out;
1441
1442 rndis_filter_query_link_speed(rndis_device, net_device);
1443
1444 /* vRSS setup */
1445 memset(&rsscap, 0, rsscap_size);
1446 ret = rndis_filter_query_device(rndis_device, net_device,
1447 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1448 &rsscap, &rsscap_size);
1449 if (ret || rsscap.num_recv_que < 2)
1450 goto out;
1451
1452 /* This guarantees that num_possible_rss_qs <= num_online_cpus */
1453 num_possible_rss_qs = min_t(u32, num_online_cpus(),
1454 rsscap.num_recv_que);
1455
1456 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1457
1458 /* We will use the given number of channels if available. */
1459 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1460
1461 if (!netif_is_rxfh_configured(net)) {
1462 for (i = 0; i < ITAB_NUM; i++)
1463 ndc->rx_table[i] = ethtool_rxfh_indir_default(
1464 i, net_device->num_chn);
1465 }
1466
1467 atomic_set(&net_device->open_chn, 1);
1468 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1469
1470 for (i = 1; i < net_device->num_chn; i++) {
1471 ret = netvsc_alloc_recv_comp_ring(net_device, i);
1472 if (ret) {
1473 while (--i != 0)
1474 vfree(net_device->chan_table[i].mrc.slots);
1475 goto out;
1476 }
1477 }
1478
1479 for (i = 1; i < net_device->num_chn; i++)
1480 netif_napi_add(net, &net_device->chan_table[i].napi,
1481 netvsc_poll, NAPI_POLL_WEIGHT);
1482
1483 return net_device;
1484
1485 out:
1486 /* setting up multiple channels failed */
1487 net_device->max_chn = 1;
1488 net_device->num_chn = 1;
1489 return net_device;
1490
1491 err_dev_remv:
1492 rndis_filter_device_remove(dev, net_device);
1493 return ERR_PTR(ret);
1494 }
1495
rndis_filter_device_remove(struct hv_device * dev,struct netvsc_device * net_dev)1496 void rndis_filter_device_remove(struct hv_device *dev,
1497 struct netvsc_device *net_dev)
1498 {
1499 struct rndis_device *rndis_dev = net_dev->extension;
1500
1501 /* Halt and release the rndis device */
1502 rndis_filter_halt_device(net_dev, rndis_dev);
1503
1504 netvsc_device_remove(dev);
1505 }
1506
rndis_filter_open(struct netvsc_device * nvdev)1507 int rndis_filter_open(struct netvsc_device *nvdev)
1508 {
1509 if (!nvdev)
1510 return -EINVAL;
1511
1512 return rndis_filter_open_device(nvdev->extension);
1513 }
1514
rndis_filter_close(struct netvsc_device * nvdev)1515 int rndis_filter_close(struct netvsc_device *nvdev)
1516 {
1517 if (!nvdev)
1518 return -EINVAL;
1519
1520 return rndis_filter_close_device(nvdev->extension);
1521 }
1522