1 /** @file
2 * @brief DNS resolve API
3 *
4 * An API for applications to do DNS query.
5 */
6
7 /*
8 * Copyright (c) 2017 Intel Corporation
9 *
10 * SPDX-License-Identifier: Apache-2.0
11 */
12
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_dns_resolve, CONFIG_DNS_RESOLVER_LOG_LEVEL);
15
16 #include <zephyr/types.h>
17 #include <zephyr/random/random.h>
18 #include <string.h>
19 #include <errno.h>
20 #include <stdlib.h>
21
22 #include <zephyr/sys/crc.h>
23 #include <zephyr/net/net_ip.h>
24 #include <zephyr/net/net_pkt.h>
25 #include <zephyr/net/net_mgmt.h>
26 #include <zephyr/net/dns_resolve.h>
27 #include "dns_pack.h"
28 #include "dns_internal.h"
29
30 #define DNS_SERVER_COUNT CONFIG_DNS_RESOLVER_MAX_SERVERS
31 #define SERVER_COUNT (DNS_SERVER_COUNT + DNS_MAX_MCAST_SERVERS)
32
33 #define MDNS_IPV4_ADDR "224.0.0.251:5353"
34 #define MDNS_IPV6_ADDR "[ff02::fb]:5353"
35
36 #define LLMNR_IPV4_ADDR "224.0.0.252:5355"
37 #define LLMNR_IPV6_ADDR "[ff02::1:3]:5355"
38
39 #define DNS_BUF_TIMEOUT K_MSEC(500) /* ms */
40
41 /* RFC 1035, 3.1. Name space definitions
42 * To simplify implementations, the total length of a domain name (i.e.,
43 * label octets and label length octets) is restricted to 255 octets or
44 * less.
45 */
46 #define DNS_MAX_NAME_LEN 255
47
48 #define DNS_QUERY_MAX_SIZE (DNS_MSG_HEADER_SIZE + DNS_MAX_NAME_LEN + \
49 DNS_QTYPE_LEN + DNS_QCLASS_LEN)
50
51 /* This value is recommended by RFC 1035 */
52 #define DNS_RESOLVER_MAX_BUF_SIZE 512
53 #define DNS_RESOLVER_MIN_BUF 1
54 #define DNS_RESOLVER_BUF_CTR (DNS_RESOLVER_MIN_BUF + \
55 CONFIG_DNS_RESOLVER_ADDITIONAL_BUF_CTR)
56
57 /* Compressed RR uses a pointer to another RR. So, min size is 12 bytes without
58 * considering RR payload.
59 * See https://tools.ietf.org/html/rfc1035#section-4.1.4
60 */
61 #define DNS_ANSWER_PTR_LEN 12
62
63 /* See dns_unpack_answer, and also see:
64 * https://tools.ietf.org/html/rfc1035#section-4.1.2
65 */
66 #define DNS_QUERY_POS 0x0c
67
68 #define DNS_IPV4_LEN sizeof(struct in_addr)
69 #define DNS_IPV6_LEN sizeof(struct in6_addr)
70
71 NET_BUF_POOL_DEFINE(dns_msg_pool, DNS_RESOLVER_BUF_CTR,
72 DNS_RESOLVER_MAX_BUF_SIZE, 0, NULL);
73
74 NET_BUF_POOL_DEFINE(dns_qname_pool, DNS_RESOLVER_BUF_CTR, DNS_MAX_NAME_LEN,
75 0, NULL);
76
77 static struct dns_resolve_context dns_default_ctx;
78
79 /* Must be invoked with context lock held */
80 static int dns_write(struct dns_resolve_context *ctx,
81 int server_idx,
82 int query_idx,
83 struct net_buf *dns_data,
84 struct net_buf *dns_qname,
85 int hop_limit);
86
server_is_mdns(sa_family_t family,struct sockaddr * addr)87 static bool server_is_mdns(sa_family_t family, struct sockaddr *addr)
88 {
89 if (family == AF_INET) {
90 if (net_ipv4_is_addr_mcast(&net_sin(addr)->sin_addr) &&
91 net_sin(addr)->sin_addr.s4_addr[3] == 251U) {
92 return true;
93 }
94
95 return false;
96 }
97
98 if (family == AF_INET6) {
99 if (net_ipv6_is_addr_mcast(&net_sin6(addr)->sin6_addr) &&
100 net_sin6(addr)->sin6_addr.s6_addr[15] == 0xfb) {
101 return true;
102 }
103
104 return false;
105 }
106
107 return false;
108 }
109
server_is_llmnr(sa_family_t family,struct sockaddr * addr)110 static bool server_is_llmnr(sa_family_t family, struct sockaddr *addr)
111 {
112 if (family == AF_INET) {
113 if (net_ipv4_is_addr_mcast(&net_sin(addr)->sin_addr) &&
114 net_sin(addr)->sin_addr.s4_addr[3] == 252U) {
115 return true;
116 }
117
118 return false;
119 }
120
121 if (family == AF_INET6) {
122 if (net_ipv6_is_addr_mcast(&net_sin6(addr)->sin6_addr) &&
123 net_sin6(addr)->sin6_addr.s6_addr[15] == 0x03) {
124 return true;
125 }
126
127 return false;
128 }
129
130 return false;
131 }
132
dns_postprocess_server(struct dns_resolve_context * ctx,int idx)133 static void dns_postprocess_server(struct dns_resolve_context *ctx, int idx)
134 {
135 struct sockaddr *addr = &ctx->servers[idx].dns_server;
136
137 if (addr->sa_family == AF_INET) {
138 ctx->servers[idx].is_mdns = server_is_mdns(AF_INET, addr);
139 if (!ctx->servers[idx].is_mdns) {
140 ctx->servers[idx].is_llmnr =
141 server_is_llmnr(AF_INET, addr);
142 }
143
144 if (net_sin(addr)->sin_port == 0U) {
145 if (IS_ENABLED(CONFIG_MDNS_RESOLVER) &&
146 ctx->servers[idx].is_mdns) {
147 /* We only use 5353 as a default port
148 * if mDNS support is enabled. User can
149 * override this by defining the port
150 * in config file.
151 */
152 net_sin(addr)->sin_port = htons(5353);
153 } else if (IS_ENABLED(CONFIG_LLMNR_RESOLVER) &&
154 ctx->servers[idx].is_llmnr) {
155 /* We only use 5355 as a default port
156 * if LLMNR support is enabled. User can
157 * override this by defining the port
158 * in config file.
159 */
160 net_sin(addr)->sin_port = htons(5355);
161 } else {
162 net_sin(addr)->sin_port = htons(53);
163 }
164 }
165 } else {
166 ctx->servers[idx].is_mdns = server_is_mdns(AF_INET6, addr);
167 if (!ctx->servers[idx].is_mdns) {
168 ctx->servers[idx].is_llmnr =
169 server_is_llmnr(AF_INET6, addr);
170 }
171
172 if (net_sin6(addr)->sin6_port == 0U) {
173 if (IS_ENABLED(CONFIG_MDNS_RESOLVER) &&
174 ctx->servers[idx].is_mdns) {
175 net_sin6(addr)->sin6_port = htons(5353);
176 } else if (IS_ENABLED(CONFIG_LLMNR_RESOLVER) &&
177 ctx->servers[idx].is_llmnr) {
178 net_sin6(addr)->sin6_port = htons(5355);
179 } else {
180 net_sin6(addr)->sin6_port = htons(53);
181 }
182 }
183 }
184 }
185
186 /* Must be invoked with context lock held */
dns_resolve_init_locked(struct dns_resolve_context * ctx,const char * servers[],const struct sockaddr * servers_sa[])187 static int dns_resolve_init_locked(struct dns_resolve_context *ctx,
188 const char *servers[],
189 const struct sockaddr *servers_sa[])
190 {
191 #if defined(CONFIG_NET_IPV6)
192 struct sockaddr_in6 local_addr6 = {
193 .sin6_family = AF_INET6,
194 .sin6_port = 0,
195 };
196 #endif
197 #if defined(CONFIG_NET_IPV4)
198 struct sockaddr_in local_addr4 = {
199 .sin_family = AF_INET,
200 .sin_port = 0,
201 };
202 #endif
203 struct sockaddr *local_addr = NULL;
204 socklen_t addr_len = 0;
205 int i = 0, idx = 0;
206 struct net_if *iface;
207 int ret, count;
208
209 if (!ctx) {
210 return -ENOENT;
211 }
212
213 if (ctx->state != DNS_RESOLVE_CONTEXT_INACTIVE) {
214 ret = -ENOTEMPTY;
215 goto fail;
216 }
217
218 if (servers) {
219 for (i = 0; idx < SERVER_COUNT && servers[i]; i++) {
220 struct sockaddr *addr = &ctx->servers[idx].dns_server;
221
222 (void)memset(addr, 0, sizeof(*addr));
223
224 ret = net_ipaddr_parse(servers[i], strlen(servers[i]),
225 addr);
226 if (!ret) {
227 continue;
228 }
229
230 dns_postprocess_server(ctx, idx);
231
232 NET_DBG("[%d] %s%s%s", i, servers[i],
233 IS_ENABLED(CONFIG_MDNS_RESOLVER) ?
234 (ctx->servers[i].is_mdns ? " mDNS" : "") : "",
235 IS_ENABLED(CONFIG_LLMNR_RESOLVER) ?
236 (ctx->servers[i].is_llmnr ?
237 " LLMNR" : "") : "");
238 idx++;
239 }
240 }
241
242 if (servers_sa) {
243 for (i = 0; idx < SERVER_COUNT && servers_sa[i]; i++) {
244 memcpy(&ctx->servers[idx].dns_server, servers_sa[i],
245 sizeof(ctx->servers[idx].dns_server));
246 dns_postprocess_server(ctx, idx);
247 idx++;
248 }
249 }
250
251 for (i = 0, count = 0;
252 i < SERVER_COUNT && ctx->servers[i].dns_server.sa_family; i++) {
253
254 if (ctx->servers[i].dns_server.sa_family == AF_INET6) {
255 #if defined(CONFIG_NET_IPV6)
256 local_addr = (struct sockaddr *)&local_addr6;
257 addr_len = sizeof(struct sockaddr_in6);
258
259 if (IS_ENABLED(CONFIG_MDNS_RESOLVER) &&
260 ctx->servers[i].is_mdns) {
261 local_addr6.sin6_port = htons(5353);
262 }
263 #else
264 continue;
265 #endif
266 }
267
268 if (ctx->servers[i].dns_server.sa_family == AF_INET) {
269 #if defined(CONFIG_NET_IPV4)
270 local_addr = (struct sockaddr *)&local_addr4;
271 addr_len = sizeof(struct sockaddr_in);
272
273 if (IS_ENABLED(CONFIG_MDNS_RESOLVER) &&
274 ctx->servers[i].is_mdns) {
275 local_addr4.sin_port = htons(5353);
276 }
277 #else
278 continue;
279 #endif
280 }
281
282 if (!local_addr) {
283 NET_DBG("Local address not set");
284 ret = -EAFNOSUPPORT;
285 goto fail;
286 }
287
288 ret = net_context_get(ctx->servers[i].dns_server.sa_family,
289 SOCK_DGRAM, IPPROTO_UDP,
290 &ctx->servers[i].net_ctx);
291 if (ret < 0) {
292 NET_DBG("Cannot get net_context (%d)", ret);
293 goto fail;
294 }
295
296 ret = net_context_bind(ctx->servers[i].net_ctx,
297 local_addr, addr_len);
298 if (ret < 0) {
299 NET_DBG("Cannot bind DNS context (%d)", ret);
300 goto fail;
301 }
302
303 iface = net_context_get_iface(ctx->servers[i].net_ctx);
304
305 if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
306 net_mgmt_event_notify_with_info(
307 NET_EVENT_DNS_SERVER_ADD,
308 iface, (void *)&ctx->servers[i].dns_server,
309 sizeof(struct sockaddr));
310 } else {
311 net_mgmt_event_notify(NET_EVENT_DNS_SERVER_ADD, iface);
312 }
313
314 #if defined(CONFIG_NET_IPV6)
315 local_addr6.sin6_port = 0;
316 #endif
317
318 #if defined(CONFIG_NET_IPV4)
319 local_addr4.sin_port = 0;
320 #endif
321
322 count++;
323 }
324
325 if (count == 0) {
326 /* No servers defined */
327 NET_DBG("No DNS servers defined.");
328 ret = -EINVAL;
329 goto fail;
330 }
331
332 ctx->state = DNS_RESOLVE_CONTEXT_ACTIVE;
333 ctx->buf_timeout = DNS_BUF_TIMEOUT;
334 ret = 0;
335
336 fail:
337 return ret;
338 }
339
dns_resolve_init(struct dns_resolve_context * ctx,const char * servers[],const struct sockaddr * servers_sa[])340 int dns_resolve_init(struct dns_resolve_context *ctx, const char *servers[],
341 const struct sockaddr *servers_sa[])
342 {
343 if (!ctx) {
344 return -ENOENT;
345 }
346
347 (void)memset(ctx, 0, sizeof(*ctx));
348
349 (void)k_mutex_init(&ctx->lock);
350 ctx->state = DNS_RESOLVE_CONTEXT_INACTIVE;
351
352 /* As this function is called only once during system init, there is no
353 * reason to acquire lock.
354 */
355 return dns_resolve_init_locked(ctx, servers, servers_sa);
356 }
357
358 /* Check whether a slot is available for use, or optionally whether it can be
359 * reclaimed.
360 *
361 * @param pending_query the query slot in question
362 *
363 * @param reclaim_if_available if the slot is marked in use, but the query has
364 * been completed and the work item is no longer pending, complete the release
365 * of the slot.
366 *
367 * @return true if and only if the slot can be used for a new query.
368 */
check_query_active(struct dns_pending_query * pending_query,bool reclaim_if_available)369 static inline bool check_query_active(struct dns_pending_query *pending_query,
370 bool reclaim_if_available)
371 {
372 int ret = false;
373
374 if (pending_query->cb != NULL) {
375 ret = true;
376 if (reclaim_if_available
377 && pending_query->query == NULL
378 && k_work_delayable_busy_get(&pending_query->timer) == 0) {
379 pending_query->cb = NULL;
380 ret = false;
381 }
382 }
383
384 return ret;
385 }
386
387 /* Must be invoked with context lock held */
get_cb_slot(struct dns_resolve_context * ctx)388 static inline int get_cb_slot(struct dns_resolve_context *ctx)
389 {
390 int i;
391
392 for (i = 0; i < CONFIG_DNS_NUM_CONCUR_QUERIES; i++) {
393 if (!check_query_active(&ctx->queries[i], true)) {
394 return i;
395 }
396 }
397
398 return -ENOENT;
399 }
400
401 /* Invoke the callback associated with a query slot, if still relevant.
402 *
403 * Must be invoked with context lock held.
404 *
405 * @param status the query status value
406 * @param info the query result structure
407 * @param pending_query the query slot that will provide the callback
408 **/
invoke_query_callback(int status,struct dns_addrinfo * info,struct dns_pending_query * pending_query)409 static inline void invoke_query_callback(int status,
410 struct dns_addrinfo *info,
411 struct dns_pending_query *pending_query)
412 {
413 /* Only notify if the slot is neither released nor in the process of
414 * being released.
415 */
416 if (pending_query->query != NULL && pending_query->cb != NULL) {
417 pending_query->cb(status, info, pending_query->user_data);
418 }
419 }
420
421 /* Release a query slot reserved by get_cb_slot().
422 *
423 * Must be invoked with context lock held.
424 *
425 * @param pending_query the query slot to be released
426 */
release_query(struct dns_pending_query * pending_query)427 static void release_query(struct dns_pending_query *pending_query)
428 {
429 int busy = k_work_cancel_delayable(&pending_query->timer);
430
431 /* If the work item is no longer pending we're done. */
432 if (busy == 0) {
433 /* All done. */
434 pending_query->cb = NULL;
435 } else {
436 /* Work item is still pending. Set a secondary condition that
437 * can be checked by get_cb_slot() to complete release of the
438 * slot once the work item has been confirmed to be completed.
439 */
440 pending_query->query = NULL;
441 }
442 }
443
444 /* Must be invoked with context lock held */
get_slot_by_id(struct dns_resolve_context * ctx,uint16_t dns_id,uint16_t query_hash)445 static inline int get_slot_by_id(struct dns_resolve_context *ctx,
446 uint16_t dns_id,
447 uint16_t query_hash)
448 {
449 int i;
450
451 for (i = 0; i < CONFIG_DNS_NUM_CONCUR_QUERIES; i++) {
452 if (check_query_active(&ctx->queries[i], false) &&
453 ctx->queries[i].id == dns_id &&
454 (query_hash == 0 ||
455 ctx->queries[i].query_hash == query_hash)) {
456 return i;
457 }
458 }
459
460 return -ENOENT;
461 }
462
463 /* Unit test needs to be able to call this function */
464 #if !defined(CONFIG_NET_TEST)
465 static
466 #endif
dns_validate_msg(struct dns_resolve_context * ctx,struct dns_msg_t * dns_msg,uint16_t * dns_id,int * query_idx,struct net_buf * dns_cname,uint16_t * query_hash)467 int dns_validate_msg(struct dns_resolve_context *ctx,
468 struct dns_msg_t *dns_msg,
469 uint16_t *dns_id,
470 int *query_idx,
471 struct net_buf *dns_cname,
472 uint16_t *query_hash)
473 {
474 struct dns_addrinfo info = { 0 };
475 uint32_t ttl; /* RR ttl, so far it is not passed to caller */
476 uint8_t *src, *addr;
477 const char *query_name;
478 int address_size;
479 /* index that points to the current answer being analyzed */
480 int answer_ptr;
481 int items;
482 int server_idx;
483 int ret = 0;
484
485 /* Make sure that we can read DNS id, flags and rcode */
486 if (dns_msg->msg_size < (sizeof(*dns_id) + sizeof(uint16_t))) {
487 ret = DNS_EAI_FAIL;
488 goto quit;
489 }
490
491 /* The dns_unpack_response_header() has design flaw as it expects
492 * dns id to be given instead of returning the id to the caller.
493 * In our case we would like to get it returned instead so that we
494 * can match the DNS query that we sent. When dns_read() is called,
495 * we do not know what the DNS id is yet.
496 */
497 *dns_id = dns_unpack_header_id(dns_msg->msg);
498
499 if (dns_header_rcode(dns_msg->msg) == DNS_HEADER_REFUSED) {
500 ret = DNS_EAI_FAIL;
501 goto quit;
502 }
503
504 /* We might receive a query while we are waiting for a response, in that
505 * case we just ignore the query instead of making the resolving fail.
506 */
507 if (dns_header_qr(dns_msg->msg) == DNS_QUERY) {
508 ret = 0;
509 goto quit;
510 }
511
512 ret = dns_unpack_response_header(dns_msg, *dns_id);
513 if (ret < 0) {
514 ret = DNS_EAI_FAIL;
515 goto quit;
516 }
517
518 if (dns_header_qdcount(dns_msg->msg) != 1) {
519 /* For mDNS (when dns_id == 0) the query count is 0 */
520 if (*dns_id > 0) {
521 ret = DNS_EAI_FAIL;
522 goto quit;
523 }
524 }
525
526 ret = dns_unpack_response_query(dns_msg);
527 if (ret < 0) {
528 /* Check mDNS like above */
529 if (*dns_id > 0) {
530 ret = DNS_EAI_FAIL;
531 goto quit;
532 }
533
534 /* mDNS responses to do not have the query part so the
535 * answer starts immediately after the header.
536 */
537 dns_msg->answer_offset = dns_msg->query_offset;
538 }
539
540 /* Because in mDNS the DNS id is set to 0 and must be ignored
541 * on reply, we need to figure out the answer in order to find
542 * the proper query. To simplify things, the normal DNS responses
543 * are handled the same way.
544 */
545
546 answer_ptr = DNS_QUERY_POS;
547 items = 0;
548 server_idx = 0;
549 enum dns_rr_type answer_type = DNS_RR_TYPE_INVALID;
550
551 while (server_idx < dns_header_ancount(dns_msg->msg)) {
552 ret = dns_unpack_answer(dns_msg, answer_ptr, &ttl,
553 &answer_type);
554 if (ret < 0) {
555 ret = DNS_EAI_FAIL;
556 goto quit;
557 }
558
559 switch (dns_msg->response_type) {
560 case DNS_RESPONSE_IP:
561 if (*query_idx >= 0) {
562 goto query_known;
563 }
564
565 query_name = dns_msg->msg + dns_msg->query_offset;
566
567 /* Add \0 and query type (A or AAAA) to the hash */
568 *query_hash = crc16_ansi(query_name,
569 strlen(query_name) + 1 + 2);
570
571 *query_idx = get_slot_by_id(ctx, *dns_id, *query_hash);
572 if (*query_idx < 0) {
573 ret = DNS_EAI_SYSTEM;
574 goto quit;
575 }
576
577 query_known:
578 if (ctx->queries[*query_idx].query_type ==
579 DNS_QUERY_TYPE_A) {
580 if (answer_type != DNS_RR_TYPE_A) {
581 ret = DNS_EAI_ADDRFAMILY;
582 goto quit;
583 }
584
585 address_size = DNS_IPV4_LEN;
586 addr = (uint8_t *)&net_sin(&info.ai_addr)->
587 sin_addr;
588 info.ai_family = AF_INET;
589 info.ai_addr.sa_family = AF_INET;
590 info.ai_addrlen = sizeof(struct sockaddr_in);
591
592 } else if (ctx->queries[*query_idx].query_type ==
593 DNS_QUERY_TYPE_AAAA) {
594 if (answer_type != DNS_RR_TYPE_AAAA) {
595 ret = DNS_EAI_ADDRFAMILY;
596 goto quit;
597 }
598
599 /* We cannot resolve IPv6 address if IPv6 is
600 * disabled. The reason being that
601 * "struct sockaddr" does not have enough space
602 * for IPv6 address in that case.
603 */
604 #if defined(CONFIG_NET_IPV6)
605 address_size = DNS_IPV6_LEN;
606 addr = (uint8_t *)&net_sin6(&info.ai_addr)->
607 sin6_addr;
608 info.ai_family = AF_INET6;
609 info.ai_addr.sa_family = AF_INET6;
610 info.ai_addrlen = sizeof(struct sockaddr_in6);
611 #else
612 ret = DNS_EAI_FAMILY;
613 goto quit;
614 #endif
615 } else {
616 ret = DNS_EAI_FAMILY;
617 goto quit;
618 }
619
620 if (dns_msg->response_length < address_size) {
621 /* it seems this is a malformed message */
622 ret = DNS_EAI_FAIL;
623 goto quit;
624 }
625
626 if ((dns_msg->response_position + address_size) >
627 dns_msg->msg_size) {
628 /* Too short message */
629 ret = DNS_EAI_FAIL;
630 goto quit;
631 }
632
633 src = dns_msg->msg + dns_msg->response_position;
634 memcpy(addr, src, address_size);
635
636 invoke_query_callback(DNS_EAI_INPROGRESS, &info,
637 &ctx->queries[*query_idx]);
638 items++;
639 break;
640
641 case DNS_RESPONSE_CNAME_NO_IP:
642 /* Instead of using the QNAME at DNS_QUERY_POS,
643 * we will use this CNAME
644 */
645 answer_ptr = dns_msg->response_position;
646 break;
647
648 default:
649 ret = DNS_EAI_FAIL;
650 goto quit;
651 }
652
653 /* Update the answer offset to point to the next RR (answer) */
654 dns_msg->answer_offset += dns_msg->response_position -
655 dns_msg->answer_offset;
656 dns_msg->answer_offset += dns_msg->response_length;
657
658 server_idx++;
659 }
660
661 if (*query_idx < 0) {
662 /* If the query_idx is still unknown, try to get it here
663 * and hope it is found.
664 */
665 query_name = dns_msg->msg + dns_msg->query_offset;
666 *query_hash = crc16_ansi(query_name,
667 strlen(query_name) + 1 + 2);
668
669 *query_idx = get_slot_by_id(ctx, *dns_id, *query_hash);
670 if (*query_idx < 0) {
671 ret = DNS_EAI_SYSTEM;
672 goto quit;
673 }
674 }
675
676 /* No IP addresses were found, so we take the last CNAME to generate
677 * another query. Number of additional queries is controlled via Kconfig
678 */
679 if (items == 0) {
680 if (dns_msg->response_type == DNS_RESPONSE_CNAME_NO_IP) {
681 uint16_t pos = dns_msg->response_position;
682
683 /* The dns_cname should always be set. As a special
684 * case, it might not be set for unit tests that call
685 * this function directly.
686 */
687 if (dns_cname) {
688 ret = dns_copy_qname(dns_cname->data,
689 &dns_cname->len,
690 dns_cname->size,
691 dns_msg, pos);
692 if (ret < 0) {
693 ret = DNS_EAI_SYSTEM;
694 goto quit;
695 }
696 }
697
698 ret = DNS_EAI_AGAIN;
699 goto quit;
700 }
701 }
702
703 if (items == 0) {
704 ret = DNS_EAI_NODATA;
705 } else {
706 ret = DNS_EAI_ALLDONE;
707 }
708
709 quit:
710 return ret;
711 }
712
713 /* Must be invoked with context lock held */
dns_read(struct dns_resolve_context * ctx,struct net_pkt * pkt,struct net_buf * dns_data,uint16_t * dns_id,struct net_buf * dns_cname,uint16_t * query_hash)714 static int dns_read(struct dns_resolve_context *ctx,
715 struct net_pkt *pkt,
716 struct net_buf *dns_data,
717 uint16_t *dns_id,
718 struct net_buf *dns_cname,
719 uint16_t *query_hash)
720 {
721 /* Helper struct to track the dns msg received from the server */
722 struct dns_msg_t dns_msg;
723 int data_len;
724 int ret;
725 int query_idx = -1;
726
727 data_len = MIN(net_pkt_remaining_data(pkt), DNS_RESOLVER_MAX_BUF_SIZE);
728
729 /* TODO: Instead of this temporary copy, just use the net_pkt directly.
730 */
731 ret = net_pkt_read(pkt, dns_data->data, data_len);
732 if (ret < 0) {
733 ret = DNS_EAI_MEMORY;
734 goto quit;
735 }
736
737 dns_msg.msg = dns_data->data;
738 dns_msg.msg_size = data_len;
739
740 ret = dns_validate_msg(ctx, &dns_msg, dns_id, &query_idx,
741 dns_cname, query_hash);
742 if (ret == DNS_EAI_AGAIN) {
743 goto finished;
744 }
745
746 if (ret < 0 || query_idx < 0 ||
747 query_idx > CONFIG_DNS_NUM_CONCUR_QUERIES) {
748 goto quit;
749 }
750
751 invoke_query_callback(ret, NULL, &ctx->queries[query_idx]);
752
753 /* Marks the end of the results */
754 release_query(&ctx->queries[query_idx]);
755
756 net_pkt_unref(pkt);
757
758 return 0;
759
760 finished:
761 dns_resolve_cancel_with_name(ctx, *dns_id,
762 ctx->queries[query_idx].query,
763 ctx->queries[query_idx].query_type);
764 quit:
765 net_pkt_unref(pkt);
766
767 return ret;
768 }
769
cb_recv(struct net_context * net_ctx,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto_hdr,int status,void * user_data)770 static void cb_recv(struct net_context *net_ctx,
771 struct net_pkt *pkt,
772 union net_ip_header *ip_hdr,
773 union net_proto_header *proto_hdr,
774 int status,
775 void *user_data)
776 {
777 struct dns_resolve_context *ctx = user_data;
778 struct net_buf *dns_cname = NULL;
779 struct net_buf *dns_data = NULL;
780 uint16_t query_hash = 0U;
781 uint16_t dns_id = 0U;
782 int ret, i;
783
784 ARG_UNUSED(net_ctx);
785
786 k_mutex_lock(&ctx->lock, K_FOREVER);
787
788 if (ctx->state != DNS_RESOLVE_CONTEXT_ACTIVE) {
789 goto unlock;
790 }
791
792 if (status) {
793 ret = DNS_EAI_SYSTEM;
794 goto quit;
795 }
796
797 dns_data = net_buf_alloc(&dns_msg_pool, ctx->buf_timeout);
798 if (!dns_data) {
799 ret = DNS_EAI_MEMORY;
800 goto quit;
801 }
802
803 dns_cname = net_buf_alloc(&dns_qname_pool, ctx->buf_timeout);
804 if (!dns_cname) {
805 ret = DNS_EAI_MEMORY;
806 goto quit;
807 }
808
809 ret = dns_read(ctx, pkt, dns_data, &dns_id, dns_cname, &query_hash);
810 if (!ret) {
811 /* We called the callback already in dns_read() if there
812 * was no errors.
813 */
814 goto free_buf;
815 }
816
817 /* Query again if we got CNAME */
818 if (ret == DNS_EAI_AGAIN) {
819 int failure = 0;
820 int j;
821
822 i = get_slot_by_id(ctx, dns_id, query_hash);
823 if (i < 0) {
824 goto free_buf;
825 }
826
827 for (j = 0; j < SERVER_COUNT; j++) {
828 if (!ctx->servers[j].net_ctx) {
829 continue;
830 }
831
832 ret = dns_write(ctx, j, i, dns_data, dns_cname, 0);
833 if (ret < 0) {
834 failure++;
835 }
836 }
837
838 if (failure) {
839 NET_DBG("DNS cname query failed %d times", failure);
840
841 if (failure == j) {
842 ret = DNS_EAI_SYSTEM;
843 goto quit;
844 }
845 }
846
847 goto free_buf;
848 }
849
850 quit:
851 i = get_slot_by_id(ctx, dns_id, query_hash);
852 if (i < 0) {
853 goto free_buf;
854 }
855
856 invoke_query_callback(ret, NULL, &ctx->queries[i]);
857
858 /* Marks the end of the results */
859 release_query(&ctx->queries[i]);
860
861 free_buf:
862 if (dns_data) {
863 net_buf_unref(dns_data);
864 }
865
866 if (dns_cname) {
867 net_buf_unref(dns_cname);
868 }
869
870 unlock:
871 k_mutex_unlock(&ctx->lock);
872 }
873
874 /* Must be invoked with context lock held */
dns_write(struct dns_resolve_context * ctx,int server_idx,int query_idx,struct net_buf * dns_data,struct net_buf * dns_qname,int hop_limit)875 static int dns_write(struct dns_resolve_context *ctx,
876 int server_idx,
877 int query_idx,
878 struct net_buf *dns_data,
879 struct net_buf *dns_qname,
880 int hop_limit)
881 {
882 enum dns_query_type query_type;
883 struct net_context *net_ctx;
884 struct sockaddr *server;
885 int server_addr_len;
886 uint16_t dns_id;
887 int ret;
888
889 net_ctx = ctx->servers[server_idx].net_ctx;
890 server = &ctx->servers[server_idx].dns_server;
891 dns_id = ctx->queries[query_idx].id;
892 query_type = ctx->queries[query_idx].query_type;
893
894 ret = dns_msg_pack_query(dns_data->data, &dns_data->len, dns_data->size,
895 dns_qname->data, dns_qname->len, dns_id,
896 (enum dns_rr_type)query_type);
897 if (ret < 0) {
898 return -EINVAL;
899 }
900
901 /* Add \0 and query type (A or AAAA) to the hash. Note that
902 * the dns_qname->len contains the length of \0
903 */
904 ctx->queries[query_idx].query_hash =
905 crc16_ansi(dns_data->data + DNS_MSG_HEADER_SIZE,
906 dns_qname->len + 2);
907
908 if (IS_ENABLED(CONFIG_NET_IPV6) &&
909 net_context_get_family(net_ctx) == AF_INET6 &&
910 hop_limit > 0) {
911 net_context_set_ipv6_hop_limit(net_ctx, hop_limit);
912 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
913 net_context_get_family(net_ctx) == AF_INET &&
914 hop_limit > 0) {
915 net_context_set_ipv4_ttl(net_ctx, hop_limit);
916 }
917
918 ret = net_context_recv(net_ctx, cb_recv, K_NO_WAIT, ctx);
919 if (ret < 0 && ret != -EALREADY) {
920 NET_DBG("Could not receive from socket (%d)", ret);
921 return ret;
922 }
923
924 if (server->sa_family == AF_INET) {
925 server_addr_len = sizeof(struct sockaddr_in);
926 } else {
927 server_addr_len = sizeof(struct sockaddr_in6);
928 }
929
930 ret = k_work_reschedule(&ctx->queries[query_idx].timer,
931 ctx->queries[query_idx].timeout);
932 if (ret < 0) {
933 NET_DBG("[%u] cannot submit work to server idx %d for id %u "
934 "ret %d", query_idx, server_idx, dns_id, ret);
935 return ret;
936 }
937
938 NET_DBG("[%u] submitting work to server idx %d for id %u "
939 "hash %u", query_idx, server_idx, dns_id,
940 ctx->queries[query_idx].query_hash);
941
942 ret = net_context_sendto(net_ctx, dns_data->data, dns_data->len,
943 server, server_addr_len, NULL,
944 K_NO_WAIT, NULL);
945 if (ret < 0) {
946 NET_DBG("Cannot send query (%d)", ret);
947 return ret;
948 }
949
950 return 0;
951 }
952
953 /* Must be invoked with context lock held */
dns_resolve_cancel_slot(struct dns_resolve_context * ctx,int slot)954 static void dns_resolve_cancel_slot(struct dns_resolve_context *ctx, int slot)
955 {
956 invoke_query_callback(DNS_EAI_CANCELED, NULL, &ctx->queries[slot]);
957
958 release_query(&ctx->queries[slot]);
959 }
960
961 /* Must be invoked with context lock held */
dns_resolve_cancel_all(struct dns_resolve_context * ctx)962 static void dns_resolve_cancel_all(struct dns_resolve_context *ctx)
963 {
964 int i;
965
966 for (i = 0; i < CONFIG_DNS_NUM_CONCUR_QUERIES; i++) {
967 if (ctx->queries[i].cb && ctx->queries[i].query) {
968 dns_resolve_cancel_slot(ctx, i);
969 }
970 }
971 }
972
dns_resolve_cancel_with_hash(struct dns_resolve_context * ctx,uint16_t dns_id,uint16_t query_hash,const char * query_name)973 static int dns_resolve_cancel_with_hash(struct dns_resolve_context *ctx,
974 uint16_t dns_id,
975 uint16_t query_hash,
976 const char *query_name)
977 {
978 int ret = 0;
979 int i;
980
981 k_mutex_lock(&ctx->lock, K_FOREVER);
982
983 if (ctx->state == DNS_RESOLVE_CONTEXT_DEACTIVATING) {
984 /*
985 * Cancel is part of context "deactivating" process, so no need
986 * to do anything more.
987 */
988 goto unlock;
989 }
990
991 i = get_slot_by_id(ctx, dns_id, query_hash);
992 if (i < 0) {
993 ret = -ENOENT;
994 goto unlock;
995 }
996
997 NET_DBG("Cancelling DNS req %u (name %s type %d hash %u)", dns_id,
998 query_name, ctx->queries[i].query_type,
999 query_hash);
1000
1001 dns_resolve_cancel_slot(ctx, i);
1002
1003 unlock:
1004 k_mutex_unlock(&ctx->lock);
1005
1006 return ret;
1007 }
1008
dns_resolve_cancel_with_name(struct dns_resolve_context * ctx,uint16_t dns_id,const char * query_name,enum dns_query_type query_type)1009 int dns_resolve_cancel_with_name(struct dns_resolve_context *ctx,
1010 uint16_t dns_id,
1011 const char *query_name,
1012 enum dns_query_type query_type)
1013 {
1014 uint16_t query_hash = 0;
1015
1016 if (query_name) {
1017 struct net_buf *buf;
1018 uint16_t len;
1019 int ret;
1020
1021 /* Use net_buf as a temporary buffer to store the packed
1022 * DNS name.
1023 */
1024 buf = net_buf_alloc(&dns_msg_pool, ctx->buf_timeout);
1025 if (!buf) {
1026 return -ENOMEM;
1027 }
1028
1029 ret = dns_msg_pack_qname(&len, buf->data, buf->size,
1030 query_name);
1031 if (ret >= 0) {
1032 /* If the query string + \0 + query type (A or AAAA)
1033 * does not fit the tmp buf, then bail out
1034 */
1035 if ((len + 2) > buf->size) {
1036 net_buf_unref(buf);
1037 return -ENOMEM;
1038 }
1039
1040 net_buf_add(buf, len);
1041 net_buf_add_be16(buf, query_type);
1042
1043 query_hash = crc16_ansi(buf->data, len + 2);
1044 }
1045
1046 net_buf_unref(buf);
1047
1048 if (ret < 0) {
1049 return ret;
1050 }
1051 }
1052
1053 return dns_resolve_cancel_with_hash(ctx, dns_id, query_hash,
1054 query_name);
1055 }
1056
dns_resolve_cancel(struct dns_resolve_context * ctx,uint16_t dns_id)1057 int dns_resolve_cancel(struct dns_resolve_context *ctx, uint16_t dns_id)
1058 {
1059 return dns_resolve_cancel_with_name(ctx, dns_id, NULL, 0);
1060 }
1061
query_timeout(struct k_work * work)1062 static void query_timeout(struct k_work *work)
1063 {
1064 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1065 struct dns_pending_query *pending_query =
1066 CONTAINER_OF(dwork, struct dns_pending_query, timer);
1067 int ret;
1068
1069 /* We have to take the lock as we're inspecting protected content
1070 * associated with the query. But don't block the system work queue:
1071 * if the lock can't be taken immediately, reschedule the work item to
1072 * be run again after everything else has had a chance.
1073 *
1074 * Note that it's OK to use the k_work API on the delayable work
1075 * without holding the lock: it's only the associated state in the
1076 * containing structure that must be protected.
1077 */
1078 ret = k_mutex_lock(&pending_query->ctx->lock, K_NO_WAIT);
1079 if (ret != 0) {
1080 struct k_work_delayable *dwork2 = k_work_delayable_from_work(work);
1081
1082 /*
1083 * Reschedule query timeout handler with some delay, so that all
1084 * threads (including those with lower priorities) have a chance
1085 * to move forward and release DNS context lock.
1086 *
1087 * Timeout value was arbitrarily chosen and can be updated in
1088 * future if needed.
1089 */
1090 k_work_reschedule(dwork2, K_MSEC(10));
1091 return;
1092 }
1093
1094 NET_DBG("Query timeout DNS req %u type %d hash %u", pending_query->id,
1095 pending_query->query_type, pending_query->query_hash);
1096
1097 /* The resolve cancel will invoke release_query(), but release will
1098 * not be completed because the work item is still pending. Instead
1099 * the release will be completed when check_query_active() confirms
1100 * the work item is no longer active.
1101 */
1102 (void)dns_resolve_cancel_with_hash(pending_query->ctx,
1103 pending_query->id,
1104 pending_query->query_hash,
1105 pending_query->query);
1106
1107 k_mutex_unlock(&pending_query->ctx->lock);
1108 }
1109
dns_resolve_name(struct dns_resolve_context * ctx,const char * query,enum dns_query_type type,uint16_t * dns_id,dns_resolve_cb_t cb,void * user_data,int32_t timeout)1110 int dns_resolve_name(struct dns_resolve_context *ctx,
1111 const char *query,
1112 enum dns_query_type type,
1113 uint16_t *dns_id,
1114 dns_resolve_cb_t cb,
1115 void *user_data,
1116 int32_t timeout)
1117 {
1118 k_timeout_t tout;
1119 struct net_buf *dns_data = NULL;
1120 struct net_buf *dns_qname = NULL;
1121 struct sockaddr addr;
1122 int ret, i = -1, j = 0;
1123 int failure = 0;
1124 bool mdns_query = false;
1125 uint8_t hop_limit;
1126
1127 if (!ctx || !query || !cb) {
1128 return -EINVAL;
1129 }
1130
1131 tout = SYS_TIMEOUT_MS(timeout);
1132
1133 /* Timeout cannot be 0 as we cannot resolve name that fast.
1134 */
1135 if (K_TIMEOUT_EQ(tout, K_NO_WAIT)) {
1136 return -EINVAL;
1137 }
1138
1139 ret = net_ipaddr_parse(query, strlen(query), &addr);
1140 if (ret) {
1141 /* The query name was already in numeric form, no
1142 * need to continue further.
1143 */
1144 struct dns_addrinfo info = { 0 };
1145
1146 if (type == DNS_QUERY_TYPE_A) {
1147 if (net_sin(&addr)->sin_family == AF_INET6) {
1148 return -EPFNOSUPPORT;
1149 }
1150
1151 memcpy(net_sin(&info.ai_addr), net_sin(&addr),
1152 sizeof(struct sockaddr_in));
1153 info.ai_family = AF_INET;
1154 info.ai_addr.sa_family = AF_INET;
1155 info.ai_addrlen = sizeof(struct sockaddr_in);
1156 } else if (type == DNS_QUERY_TYPE_AAAA) {
1157 /* We do not support AI_V4MAPPED atm, so if the user
1158 * asks an IPv6 address but it is an IPv4 one, then
1159 * return an error. Note that getaddrinfo() will swap
1160 * the error to EINVAL, the EPFNOSUPPORT is returned
1161 * here so that we can find it easily.
1162 */
1163 if (net_sin(&addr)->sin_family == AF_INET) {
1164 return -EPFNOSUPPORT;
1165 }
1166
1167 #if defined(CONFIG_NET_IPV6)
1168 memcpy(net_sin6(&info.ai_addr), net_sin6(&addr),
1169 sizeof(struct sockaddr_in6));
1170 info.ai_family = AF_INET6;
1171 info.ai_addr.sa_family = AF_INET6;
1172 info.ai_addrlen = sizeof(struct sockaddr_in6);
1173 #else
1174 return -EAFNOSUPPORT;
1175 #endif
1176 } else {
1177 goto try_resolve;
1178 }
1179
1180 cb(DNS_EAI_INPROGRESS, &info, user_data);
1181 cb(DNS_EAI_ALLDONE, NULL, user_data);
1182
1183 return 0;
1184 }
1185
1186 try_resolve:
1187 k_mutex_lock(&ctx->lock, K_FOREVER);
1188
1189 if (ctx->state != DNS_RESOLVE_CONTEXT_ACTIVE) {
1190 ret = -EINVAL;
1191 goto fail;
1192 }
1193
1194 i = get_cb_slot(ctx);
1195 if (i < 0) {
1196 ret = -EAGAIN;
1197 goto fail;
1198 }
1199
1200 ctx->queries[i].cb = cb;
1201 ctx->queries[i].timeout = tout;
1202 ctx->queries[i].query = query;
1203 ctx->queries[i].query_type = type;
1204 ctx->queries[i].user_data = user_data;
1205 ctx->queries[i].ctx = ctx;
1206 ctx->queries[i].query_hash = 0;
1207
1208 k_work_init_delayable(&ctx->queries[i].timer, query_timeout);
1209
1210 dns_data = net_buf_alloc(&dns_msg_pool, ctx->buf_timeout);
1211 if (!dns_data) {
1212 ret = -ENOMEM;
1213 goto quit;
1214 }
1215
1216 dns_qname = net_buf_alloc(&dns_qname_pool, ctx->buf_timeout);
1217 if (!dns_qname) {
1218 ret = -ENOMEM;
1219 goto quit;
1220 }
1221
1222 ret = dns_msg_pack_qname(&dns_qname->len, dns_qname->data,
1223 DNS_MAX_NAME_LEN, ctx->queries[i].query);
1224 if (ret < 0) {
1225 goto quit;
1226 }
1227
1228 ctx->queries[i].id = sys_rand32_get();
1229
1230 /* If mDNS is enabled, then send .local queries only to multicast
1231 * address. For mDNS the id should be set to 0, see RFC 6762 ch. 18.1
1232 * for details.
1233 */
1234 if (IS_ENABLED(CONFIG_MDNS_RESOLVER)) {
1235 const char *ptr = strrchr(query, '.');
1236
1237 /* Note that we memcmp() the \0 here too */
1238 if (ptr && !memcmp(ptr, (const void *){ ".local" }, 7)) {
1239 mdns_query = true;
1240
1241 ctx->queries[i].id = 0;
1242 }
1243 }
1244
1245 /* Do this immediately after calculating the Id so that the unit
1246 * test will work properly.
1247 */
1248 if (dns_id) {
1249 *dns_id = ctx->queries[i].id;
1250
1251 NET_DBG("DNS id will be %u", *dns_id);
1252 }
1253
1254 for (j = 0; j < SERVER_COUNT; j++) {
1255 hop_limit = 0U;
1256
1257 if (!ctx->servers[j].net_ctx) {
1258 continue;
1259 }
1260
1261 /* If mDNS is enabled, then send .local queries only to
1262 * a well known multicast mDNS server address.
1263 */
1264 if (IS_ENABLED(CONFIG_MDNS_RESOLVER) && mdns_query &&
1265 !ctx->servers[j].is_mdns) {
1266 continue;
1267 }
1268
1269 /* If llmnr is enabled, then all the queries are sent to
1270 * LLMNR multicast address unless it is a mDNS query.
1271 */
1272 if (!mdns_query && IS_ENABLED(CONFIG_LLMNR_RESOLVER)) {
1273 if (!ctx->servers[j].is_llmnr) {
1274 continue;
1275 }
1276
1277 hop_limit = 1U;
1278 }
1279
1280 ret = dns_write(ctx, j, i, dns_data, dns_qname, hop_limit);
1281 if (ret < 0) {
1282 failure++;
1283 continue;
1284 }
1285
1286 /* Do one concurrent query only for each name resolve.
1287 * TODO: Change the i (query index) to do multiple concurrent
1288 * to each server.
1289 */
1290 break;
1291 }
1292
1293 if (failure) {
1294 NET_DBG("DNS query failed %d times", failure);
1295
1296 if (failure == j) {
1297 ret = -ENOENT;
1298 goto quit;
1299 }
1300 }
1301
1302 ret = 0;
1303
1304 quit:
1305 if (ret < 0) {
1306 if (i >= 0) {
1307 release_query(&ctx->queries[i]);
1308 }
1309
1310 if (dns_id) {
1311 *dns_id = 0U;
1312 }
1313 }
1314
1315 if (dns_data) {
1316 net_buf_unref(dns_data);
1317 }
1318
1319 if (dns_qname) {
1320 net_buf_unref(dns_qname);
1321 }
1322
1323 fail:
1324 k_mutex_unlock(&ctx->lock);
1325
1326 return ret;
1327 }
1328
1329 /* Must be invoked with context lock held */
dns_resolve_close_locked(struct dns_resolve_context * ctx)1330 static int dns_resolve_close_locked(struct dns_resolve_context *ctx)
1331 {
1332 int i;
1333
1334 if (ctx->state != DNS_RESOLVE_CONTEXT_ACTIVE) {
1335 return -ENOENT;
1336 }
1337
1338 ctx->state = DNS_RESOLVE_CONTEXT_DEACTIVATING;
1339
1340 /* ctx->net_ctx is never used in "deactivating" state. Additionally
1341 * following code is guaranteed to be executed only by one thread at a
1342 * time, due to required "active" -> "deactivating" state change. This
1343 * means that it is safe to put net_ctx with mutex released.
1344 *
1345 * Released mutex will prevent lower networking layers from deadlock
1346 * when calling cb_recv() (which acquires ctx->lock) just before closing
1347 * network context.
1348 */
1349 k_mutex_unlock(&ctx->lock);
1350
1351 for (i = 0; i < SERVER_COUNT; i++) {
1352 if (ctx->servers[i].net_ctx) {
1353 struct net_if *iface;
1354
1355 iface = net_context_get_iface(ctx->servers[i].net_ctx);
1356
1357 if (IS_ENABLED(CONFIG_NET_MGMT_EVENT_INFO)) {
1358 net_mgmt_event_notify_with_info(
1359 NET_EVENT_DNS_SERVER_DEL,
1360 iface,
1361 (void *)&ctx->servers[i].dns_server,
1362 sizeof(struct sockaddr));
1363 } else {
1364 net_mgmt_event_notify(NET_EVENT_DNS_SERVER_DEL,
1365 iface);
1366 }
1367
1368 net_context_put(ctx->servers[i].net_ctx);
1369 ctx->servers[i].net_ctx = NULL;
1370 }
1371 }
1372
1373 k_mutex_lock(&ctx->lock, K_FOREVER);
1374
1375 ctx->state = DNS_RESOLVE_CONTEXT_INACTIVE;
1376
1377 return 0;
1378 }
1379
dns_resolve_close(struct dns_resolve_context * ctx)1380 int dns_resolve_close(struct dns_resolve_context *ctx)
1381 {
1382 int ret;
1383
1384 k_mutex_lock(&ctx->lock, K_FOREVER);
1385 ret = dns_resolve_close_locked(ctx);
1386 k_mutex_unlock(&ctx->lock);
1387
1388 return ret;
1389 }
1390
dns_server_exists(struct dns_resolve_context * ctx,const struct sockaddr * addr)1391 static bool dns_server_exists(struct dns_resolve_context *ctx,
1392 const struct sockaddr *addr)
1393 {
1394 for (int i = 0; i < SERVER_COUNT; i++) {
1395 if (IS_ENABLED(CONFIG_NET_IPV4) && (addr->sa_family == AF_INET) &&
1396 (ctx->servers[i].dns_server.sa_family == AF_INET)) {
1397 if (net_ipv4_addr_cmp(&net_sin(addr)->sin_addr,
1398 &net_sin(&ctx->servers[i].dns_server)->sin_addr)) {
1399 return true;
1400 }
1401 }
1402
1403 if (IS_ENABLED(CONFIG_NET_IPV6) && (addr->sa_family == AF_INET6) &&
1404 (ctx->servers[i].dns_server.sa_family == AF_INET6)) {
1405 if (net_ipv6_addr_cmp(&net_sin6(addr)->sin6_addr,
1406 &net_sin6(&ctx->servers[i].dns_server)->sin6_addr)) {
1407 return true;
1408 }
1409 }
1410 }
1411
1412 return false;
1413 }
1414
dns_servers_exists(struct dns_resolve_context * ctx,const char * servers[],const struct sockaddr * servers_sa[])1415 static bool dns_servers_exists(struct dns_resolve_context *ctx,
1416 const char *servers[],
1417 const struct sockaddr *servers_sa[])
1418 {
1419 if (servers) {
1420 for (int i = 0; i < SERVER_COUNT && servers[i]; i++) {
1421 struct sockaddr addr;
1422
1423 if (!net_ipaddr_parse(servers[i], strlen(servers[i]), &addr)) {
1424 continue;
1425 }
1426
1427 if (!dns_server_exists(ctx, &addr)) {
1428 return false;
1429 }
1430 }
1431 }
1432
1433 if (servers_sa) {
1434 for (int i = 0; i < SERVER_COUNT && servers_sa[i]; i++) {
1435 if (!dns_server_exists(ctx, servers_sa[i])) {
1436 return false;
1437 }
1438 }
1439 }
1440
1441 return true;
1442 }
1443
dns_resolve_reconfigure(struct dns_resolve_context * ctx,const char * servers[],const struct sockaddr * servers_sa[])1444 int dns_resolve_reconfigure(struct dns_resolve_context *ctx,
1445 const char *servers[],
1446 const struct sockaddr *servers_sa[])
1447 {
1448 int err;
1449
1450 if (!ctx) {
1451 return -ENOENT;
1452 }
1453
1454 k_mutex_lock(&ctx->lock, K_FOREVER);
1455
1456 if (dns_servers_exists(ctx, servers, servers_sa)) {
1457 /* DNS servers did not change. */
1458 err = 0;
1459 goto unlock;
1460 }
1461
1462 if (ctx->state == DNS_RESOLVE_CONTEXT_DEACTIVATING) {
1463 err = -EBUSY;
1464 goto unlock;
1465 }
1466
1467 if (ctx->state == DNS_RESOLVE_CONTEXT_ACTIVE) {
1468 dns_resolve_cancel_all(ctx);
1469
1470 err = dns_resolve_close_locked(ctx);
1471 if (err) {
1472 goto unlock;
1473 }
1474 }
1475
1476 err = dns_resolve_init_locked(ctx, servers, servers_sa);
1477
1478 unlock:
1479 k_mutex_unlock(&ctx->lock);
1480
1481 return err;
1482 }
1483
dns_resolve_get_default(void)1484 struct dns_resolve_context *dns_resolve_get_default(void)
1485 {
1486 return &dns_default_ctx;
1487 }
1488
dns_resolve_init_default(struct dns_resolve_context * ctx)1489 int dns_resolve_init_default(struct dns_resolve_context *ctx)
1490 {
1491 int ret = 0;
1492 #if defined(CONFIG_DNS_SERVER_IP_ADDRESSES)
1493 static const char *dns_servers[SERVER_COUNT + 1];
1494 int count = DNS_SERVER_COUNT;
1495
1496 if (count > 5) {
1497 count = 5;
1498 }
1499
1500 switch (count) {
1501 #if DNS_SERVER_COUNT > 4
1502 case 5:
1503 dns_servers[4] = CONFIG_DNS_SERVER5;
1504 __fallthrough;
1505 #endif
1506 #if DNS_SERVER_COUNT > 3
1507 case 4:
1508 dns_servers[3] = CONFIG_DNS_SERVER4;
1509 __fallthrough;
1510 #endif
1511 #if DNS_SERVER_COUNT > 2
1512 case 3:
1513 dns_servers[2] = CONFIG_DNS_SERVER3;
1514 __fallthrough;
1515 #endif
1516 #if DNS_SERVER_COUNT > 1
1517 case 2:
1518 dns_servers[1] = CONFIG_DNS_SERVER2;
1519 __fallthrough;
1520 #endif
1521 #if DNS_SERVER_COUNT > 0
1522 case 1:
1523 dns_servers[0] = CONFIG_DNS_SERVER1;
1524 __fallthrough;
1525 #endif
1526 case 0:
1527 break;
1528 }
1529
1530 #if defined(CONFIG_MDNS_RESOLVER) && (MDNS_SERVER_COUNT > 0)
1531 #if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4)
1532 dns_servers[DNS_SERVER_COUNT + 1] = MDNS_IPV6_ADDR;
1533 dns_servers[DNS_SERVER_COUNT] = MDNS_IPV4_ADDR;
1534 #else /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
1535 #if defined(CONFIG_NET_IPV6)
1536 dns_servers[DNS_SERVER_COUNT] = MDNS_IPV6_ADDR;
1537 #endif
1538 #if defined(CONFIG_NET_IPV4)
1539 dns_servers[DNS_SERVER_COUNT] = MDNS_IPV4_ADDR;
1540 #endif
1541 #endif /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
1542 #endif /* MDNS_RESOLVER && MDNS_SERVER_COUNT > 0 */
1543
1544 #if defined(CONFIG_LLMNR_RESOLVER) && (LLMNR_SERVER_COUNT > 0)
1545 #if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4)
1546 dns_servers[DNS_SERVER_COUNT + MDNS_SERVER_COUNT + 1] =
1547 LLMNR_IPV6_ADDR;
1548 dns_servers[DNS_SERVER_COUNT + MDNS_SERVER_COUNT] = LLMNR_IPV4_ADDR;
1549 #else /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
1550 #if defined(CONFIG_NET_IPV6)
1551 dns_servers[DNS_SERVER_COUNT + MDNS_SERVER_COUNT] = LLMNR_IPV6_ADDR;
1552 #endif
1553 #if defined(CONFIG_NET_IPV4)
1554 dns_servers[DNS_SERVER_COUNT + MDNS_SERVER_COUNT] = LLMNR_IPV4_ADDR;
1555 #endif
1556 #endif /* CONFIG_NET_IPV6 && CONFIG_NET_IPV4 */
1557 #endif /* LLMNR_RESOLVER && LLMNR_SERVER_COUNT > 0 */
1558
1559 dns_servers[SERVER_COUNT] = NULL;
1560
1561 ret = dns_resolve_init(ctx, dns_servers, NULL);
1562 if (ret < 0) {
1563 NET_WARN("Cannot initialize DNS resolver (%d)", ret);
1564 }
1565 #else
1566 /* We must always call init even if there are no servers configured so
1567 * that DNS mutex gets initialized properly.
1568 */
1569 (void)dns_resolve_init(dns_resolve_get_default(), NULL, NULL);
1570 #endif
1571 return ret;
1572 }
1573
1574 #ifdef CONFIG_DNS_RESOLVER_AUTO_INIT
dns_init_resolver(void)1575 void dns_init_resolver(void)
1576 {
1577 dns_resolve_init_default(dns_resolve_get_default());
1578 }
1579 #endif /* CONFIG_DNS_RESOLVER_AUTO_INIT */
1580