1#!/usr/bin/env python3
2#
3#  Copyright (c) 2019, The OpenThread Authors.
4#  All rights reserved.
5#
6#  Redistribution and use in source and binary forms, with or without
7#  modification, are permitted provided that the following conditions are met:
8#  1. Redistributions of source code must retain the above copyright
9#     notice, this list of conditions and the following disclaimer.
10#  2. Redistributions in binary form must reproduce the above copyright
11#     notice, this list of conditions and the following disclaimer in the
12#     documentation and/or other materials provided with the distribution.
13#  3. Neither the name of the copyright holder nor the
14#     names of its contributors may be used to endorse or promote products
15#     derived from this software without specific prior written permission.
16#
17#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
18#  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20#  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21#  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22#  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23#  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24#  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25#  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27#  POSSIBILITY OF SUCH DAMAGE.
28#
29
30import binascii
31import json
32import logging
33import os
34import signal
35import stat
36import subprocess
37import sys
38import time
39import traceback
40import unittest
41from typing import Optional, Callable, Union, Mapping, Any
42
43import config
44import debug
45from node import Node, OtbrNode, HostNode
46from pktverify import utils as pvutils
47
48PACKET_VERIFICATION = int(os.getenv('PACKET_VERIFICATION', 0))
49
50if PACKET_VERIFICATION:
51    from pktverify.addrs import ExtAddr, EthAddr
52    from pktverify.packet_verifier import PacketVerifier
53
54PORT_OFFSET = int(os.getenv('PORT_OFFSET', "0"))
55
56ENV_THREAD_VERSION = os.getenv('THREAD_VERSION', '1.1')
57
58DEFAULT_PARAMS = {
59    'is_mtd': False,
60    'is_ftd': False,
61    'is_bbr': False,
62    'is_otbr': False,
63    'is_host': False,
64    'mode': 'rdn',
65    'allowlist': None,
66    'version': ENV_THREAD_VERSION,
67}
68"""Default configurations when creating nodes."""
69
70FTD_DEFAULT_PARAMS = {
71    'is_ftd': True,
72    'router_selection_jitter': config.DEFAULT_ROUTER_SELECTION_JITTER,
73}
74
75EXTENDED_ADDRESS_BASE = 0x166e0a0000000000
76"""Extended address base to keep U/L bit 1. The value is borrowed from Thread Test Harness."""
77
78
79class NcpSupportMixin():
80    """ The mixin to check whether a test case supports NCP.
81    """
82
83    SUPPORT_NCP = True
84
85    def __init__(self, *args, **kwargs):
86        if os.getenv('NODE_TYPE', 'sim') == 'ncp-sim' and not self.SUPPORT_NCP:
87            # 77 means skip this test case in automake tests
88            sys.exit(77)
89
90        super().__init__(*args, **kwargs)
91
92
93class TestCase(NcpSupportMixin, unittest.TestCase):
94    """The base class for all thread certification test cases.
95
96    The `topology` member of sub-class is used to create test topology.
97    """
98
99    USE_MESSAGE_FACTORY = True
100    TOPOLOGY = None
101    CASE_WIRESHARK_PREFS = None
102    SUPPORT_THREAD_1_1 = True
103    PACKET_VERIFICATION = config.PACKET_VERIFICATION_DEFAULT
104
105    def __init__(self, *args, **kwargs):
106        super().__init__(*args, **kwargs)
107
108        logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
109
110        self._start_time = None
111        self._do_packet_verification = PACKET_VERIFICATION and hasattr(self, 'verify') \
112                                       and self.PACKET_VERIFICATION == PACKET_VERIFICATION
113
114        # store all the backbone network names that are used in the test case,
115        # it keeps empty when there's no backbone traffic in the test (no otbr or host nodes)
116        self._backbone_network_names = []
117
118    def skipTest(self, reason: Any) -> None:
119        self._testSkipped = True
120        super(TestCase, self).skipTest(reason)
121
122    def setUp(self):
123        self._testSkipped = False
124
125        if ENV_THREAD_VERSION == '1.1' and not self.SUPPORT_THREAD_1_1:
126            self.skipTest('Thread 1.1 not supported.')
127
128        try:
129            self._setUp()
130        except:
131            traceback.print_exc()
132            for node in list(self.nodes.values()):
133                try:
134                    node.destroy()
135                except Exception:
136                    traceback.print_exc()
137
138            raise
139
140    def _setUp(self):
141        """Create simulator, nodes and apply configurations.
142        """
143        self._clean_up_tmp()
144
145        self.simulator = config.create_default_simulator(use_message_factory=self.USE_MESSAGE_FACTORY)
146        self.nodes = {}
147
148        os.environ['LD_LIBRARY_PATH'] = '/tmp/thread-wireshark'
149
150        if self._has_backbone_traffic():
151            self._prepare_backbone_network()
152            self._start_backbone_sniffer()
153
154        self._initial_topology = initial_topology = {}
155
156        for i, params in self.TOPOLOGY.items():
157            params = self._parse_params(params)
158            initial_topology[i] = params
159
160            backbone_network_name = self._construct_backbone_network_name(params.get('backbone_network_id')) \
161                                    if self._has_backbone_traffic() else None
162
163            logging.info("Creating node %d: %r", i, params)
164            logging.info("Backbone network: %s", backbone_network_name)
165
166            if params['is_otbr']:
167                nodeclass = OtbrNode
168            elif params['is_host']:
169                nodeclass = HostNode
170            else:
171                nodeclass = Node
172
173            node = nodeclass(i,
174                             is_mtd=params['is_mtd'],
175                             simulator=self.simulator,
176                             name=params.get('name'),
177                             version=params['version'],
178                             is_bbr=params['is_bbr'],
179                             backbone_network=backbone_network_name)
180            if 'boot_delay' in params:
181                self.simulator.go(params['boot_delay'])
182
183            self.nodes[i] = node
184
185            if node.is_host:
186                continue
187
188            self.nodes[i].set_mode(params['mode'])
189
190            if 'partition_id' in params:
191                self.nodes[i].set_preferred_partition_id(params['partition_id'])
192
193            if params['is_ftd']:
194                self.nodes[i].set_router_selection_jitter(params['router_selection_jitter'])
195
196            if 'router_upgrade_threshold' in params:
197                self.nodes[i].set_router_upgrade_threshold(params['router_upgrade_threshold'])
198            if 'router_downgrade_threshold' in params:
199                self.nodes[i].set_router_downgrade_threshold(params['router_downgrade_threshold'])
200            if 'router_eligible' in params:
201                self.nodes[i].set_router_eligible(params['router_eligible'])
202            if 'prefer_router_id' in params:
203                self.nodes[i].prefer_router_id(params['prefer_router_id'])
204
205            if 'timeout' in params:
206                self.nodes[i].set_timeout(params['timeout'])
207
208            self._set_up_active_dataset(self.nodes[i], params)
209
210            if 'pending_dataset' in params:
211                self.nodes[i].set_pending_dataset(params['pending_dataset']['pendingtimestamp'],
212                                                  params['pending_dataset']['activetimestamp'],
213                                                  panid=params['pending_dataset'].get('panid'),
214                                                  channel=params['pending_dataset'].get('channel'),
215                                                  delay=params['pending_dataset'].get('delay'))
216
217            if 'key_sequence_counter' in params:
218                self.nodes[i].set_key_sequence_counter(params['key_sequence_counter'])
219
220            if 'network_id_timeout' in params:
221                self.nodes[i].set_network_id_timeout(params['network_id_timeout'])
222
223            if 'context_reuse_delay' in params:
224                self.nodes[i].set_context_reuse_delay(params['context_reuse_delay'])
225
226            if 'max_children' in params:
227                self.nodes[i].set_max_children(params['max_children'])
228
229            if 'bbr_registration_jitter' in params:
230                self.nodes[i].set_bbr_registration_jitter(params['bbr_registration_jitter'])
231
232            if 'router_id_range' in params:
233                self.nodes[i].set_router_id_range(params['router_id_range'][0], params['router_id_range'][1])
234
235        # we have to add allowlist after nodes are all created
236        for i, params in initial_topology.items():
237            allowlist = params['allowlist']
238            if allowlist is None:
239                continue
240
241            for j in allowlist:
242                rssi = None
243                if isinstance(j, tuple):
244                    j, rssi = j
245                self.nodes[i].add_allowlist(self.nodes[j].get_addr64(), rssi=rssi)
246            self.nodes[i].enable_allowlist()
247
248        self._inspector = debug.Inspector(self)
249        self._collect_test_info_after_setup()
250
251    def _set_up_active_dataset(self, node, params):
252        dataset = {
253            'timestamp': 1,
254            'channel': config.CHANNEL,
255            'channel_mask': config.CHANNEL_MASK,
256            'extended_panid': config.EXTENDED_PANID,
257            'mesh_local_prefix': config.MESH_LOCAL_PREFIX.split('/')[0],
258            'network_key': binascii.hexlify(config.DEFAULT_NETWORK_KEY).decode(),
259            'network_name': config.NETWORK_NAME,
260            'panid': config.PANID,
261            'pskc': config.PSKC,
262            'security_policy': config.SECURITY_POLICY,
263        }
264
265        if 'channel' in params:
266            dataset['channel'] = params['channel']
267        if 'networkkey' in params:
268            dataset['network_key'] = params['networkkey']
269        if 'network_name' in params:
270            dataset['network_name'] = params['network_name']
271        if 'panid' in params:
272            dataset['panid'] = params['panid']
273
274        if 'active_dataset' in params:
275            dataset.update(params['active_dataset'])
276
277        node.set_active_dataset(**dataset)
278
279    def inspect(self):
280        self._inspector.inspect()
281
282    def tearDown(self):
283        """Destroy nodes and simulator.
284        """
285        if self._do_packet_verification and os.uname().sysname != "Linux":
286            raise NotImplementedError(
287                f'{self.test_name}: Packet Verification not available on {os.uname().sysname} (Linux only).')
288
289        if self._do_packet_verification:
290            self.simulator.go(3)
291
292        if self._has_backbone_traffic():
293            # Stop Backbone sniffer before stopping nodes so that we don't capture Codecov Uploading traffic
294            self._stop_backbone_sniffer()
295
296        for node in list(self.nodes.values()):
297            try:
298                node.stop()
299            except:
300                traceback.print_exc()
301            finally:
302                node.destroy()
303
304        self.simulator.stop()
305
306        if self._has_backbone_traffic():
307            self._remove_backbone_network()
308
309        if self._do_packet_verification:
310
311            if self._has_backbone_traffic():
312                pcap_filename = self._merge_thread_backbone_pcaps()
313            else:
314                pcap_filename = self._get_thread_pcap_filename()
315
316            self._test_info['pcap'] = pcap_filename
317
318            test_info_path = self._output_test_info()
319            if not self._testSkipped:
320                self._verify_packets(test_info_path)
321
322    def flush_all(self):
323        """Flush away all captured messages of all nodes.
324        """
325        for i in list(self.nodes.keys()):
326            self.simulator.get_messages_sent_by(i)
327
328    def flush_nodes(self, nodes):
329        """Flush away all captured messages of specified nodes.
330
331        Args:
332            nodes (list): nodes whose messages to flush.
333
334        """
335        for i in nodes:
336            if i in list(self.nodes.keys()):
337                self.simulator.get_messages_sent_by(i)
338
339    def _clean_up_tmp(self):
340        """
341        Clean up node files in tmp directory
342        """
343        os.system(f"rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap")
344
345    def _verify_packets(self, test_info_path: str):
346        pv = PacketVerifier(test_info_path, self.CASE_WIRESHARK_PREFS)
347        pv.add_common_vars()
348        pv.pkts.filter_thread_unallowed_icmpv6().must_not_next()
349        self.verify(pv)
350        print("Packet verification passed: %s" % test_info_path, file=sys.stderr)
351
352    @property
353    def test_name(self):
354        return os.getenv('TEST_NAME', 'current')
355
356    def collect_ipaddrs(self):
357        if not self._do_packet_verification:
358            return
359
360        test_info = self._test_info
361
362        for i, node in self.nodes.items():
363            ipaddrs = node.get_addrs()
364
365            if hasattr(node, 'get_ether_addrs'):
366                ipaddrs += node.get_ether_addrs()
367
368            test_info['ipaddrs'][i] = ipaddrs
369            if not node.is_host:
370                mleid = node.get_mleid()
371                test_info['mleids'][i] = mleid
372
373    def collect_rloc16s(self):
374        if not self._do_packet_verification:
375            return
376
377        test_info = self._test_info
378        test_info['rloc16s'] = {}
379
380        for i, node in self.nodes.items():
381            if not node.is_host:
382                test_info['rloc16s'][i] = '0x%04x' % node.get_addr16()
383
384    def collect_rlocs(self):
385        if not self._do_packet_verification:
386            return
387
388        test_info = self._test_info
389        test_info['rlocs'] = {}
390
391        for i, node in self.nodes.items():
392            if node.is_host:
393                continue
394
395            test_info['rlocs'][i] = node.get_rloc()
396
397    def collect_omrs(self):
398        if not self._do_packet_verification:
399            return
400
401        test_info = self._test_info
402        test_info['omrs'] = {}
403
404        for i, node in self.nodes.items():
405            if node.is_host:
406                continue
407
408            test_info['omrs'][i] = node.get_ip6_address(config.ADDRESS_TYPE.OMR)
409
410    def collect_duas(self):
411        if not self._do_packet_verification:
412            return
413
414        test_info = self._test_info
415        test_info['duas'] = {}
416
417        for i, node in self.nodes.items():
418            if node.is_host:
419                continue
420
421            test_info['duas'][i] = node.get_ip6_address(config.ADDRESS_TYPE.DUA)
422
423    def collect_leader_aloc(self, node):
424        if not self._do_packet_verification:
425            return
426
427        test_info = self._test_info
428        test_info['leader_aloc'] = self.nodes[node].get_addr_leader_aloc()
429
430    def collect_extra_vars(self, **vars):
431        if not self._do_packet_verification:
432            return
433
434        for k in vars.keys():
435            assert isinstance(k, str), k
436
437        test_vars = self._test_info.setdefault("extra_vars", {})
438        test_vars.update(vars)
439
440    def _collect_test_info_after_setup(self):
441        """
442        Collect test info after setUp
443        """
444        if not self._do_packet_verification:
445            return
446
447        test_info = self._test_info = {
448            'script': os.path.abspath(sys.argv[0]),
449            'testcase': self.test_name,
450            'start_time': time.ctime(self._start_time),
451            'pcap': '',
452            'extaddrs': {},
453            'ethaddrs': {},
454            'ipaddrs': {},
455            'mleids': {},
456            'topology': self._initial_topology,
457            'backbone': {
458                'interface': config.BACKBONE_DOCKER_NETWORK_NAME,
459                'prefix': config.BACKBONE_PREFIX,
460            },
461            'domain_prefix': config.DOMAIN_PREFIX,
462            'env': {
463                'PORT_OFFSET': config.PORT_OFFSET,
464            },
465        }
466
467        for i, node in self.nodes.items():
468            if not node.is_host:
469                extaddr = node.get_addr64()
470                test_info['extaddrs'][i] = ExtAddr(extaddr).format_octets()
471
472            if node.is_host or node.is_otbr:
473                ethaddr = node.get_ether_mac()
474                test_info['ethaddrs'][i] = EthAddr(ethaddr).format_octets()
475
476    def _construct_backbone_network_name(self, backbone_network_id) -> str:
477        """
478        Construct the name of the backbone network based on the given backbone network id from TOPOLOGY. If the
479        backbone_network_id is not defined in TOPOLOGY, use the default backbone network id.
480        """
481        id = backbone_network_id if backbone_network_id is not None else config.BACKBONE_DOCKER_NETWORK_DEFAULT_ID
482        backbone_name = f'{config.BACKBONE_DOCKER_NETWORK_NAME}.{id}'
483
484        assert backbone_name in self._backbone_network_names
485
486        return backbone_name
487
488    def _output_test_info(self):
489        """
490        Output test info to json file after tearDown
491        """
492        filename = f'{self.test_name}.json'
493        with open(filename, 'wt') as ofd:
494            ofd.write(json.dumps(self._test_info, indent=1, sort_keys=True))
495
496        return filename
497
498    def _get_thread_pcap_filename(self):
499        current_pcap = self.test_name + '.pcap'
500        return os.path.abspath(current_pcap)
501
502    def assure_run_ok(self, cmd, shell=False):
503        if not shell and isinstance(cmd, str):
504            cmd = cmd.split()
505        proc = subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=shell)
506        print(">>> %s => %d" % (cmd, proc.returncode), file=sys.stderr)
507        proc.check_returncode()
508
509    def _parse_params(self, params: Optional[dict]) -> dict:
510        params = params or {}
511
512        if params.get('is_bbr') or params.get('is_otbr'):
513            # BBRs must not use thread version 1.1
514            version = params.get('version', '1.4')
515            assert version != '1.1', params
516            params['version'] = version
517            params.setdefault('bbr_registration_jitter', config.DEFAULT_BBR_REGISTRATION_JITTER)
518        elif params.get('is_host'):
519            # Hosts must not specify thread version
520            assert params.get('version', '') == '', params
521            params['version'] = ''
522
523        # use 1.4 node for 1.2 tests
524        if params.get('version') == '1.2':
525            params['version'] = '1.4'
526
527        is_ftd = (not params.get('is_mtd') and not params.get('is_host'))
528
529        effective_params = DEFAULT_PARAMS.copy()
530
531        if is_ftd:
532            effective_params.update(FTD_DEFAULT_PARAMS)
533
534        effective_params.update(params)
535
536        return effective_params
537
538    def _has_backbone_traffic(self):
539        for param in self.TOPOLOGY.values():
540            if param and (param.get('is_otbr') or param.get('is_host')):
541                return True
542
543        return False
544
545    def _prepare_backbone_network(self):
546        """
547        Creates one or more backbone networks (Docker bridge networks) based on the TOPOLOGY definition.
548
549        * If `backbone_network_id` is defined in the TOPOLOGY:
550            * Network name:   `backbone{PORT_OFFSET}.{backbone_network_id}`      (e.g., "backbone0.0", "backbone0.1")
551            * Network prefix: `backbone{PORT_OFFSET}:{backbone_network_id}::/64` (e.g., "9100:0::/64", "9100:1::/64")
552
553        * If `backbone_network_id` is undefined:
554            * Network name:   `backbone{PORT_OFFSET}.0`    (e.g., "backbone0.0")
555            * Network prefix: `backbone{PORT_OFFSET}::/64` (e.g., "9100::/64")
556        """
557        # Create backbone_set to store all the backbone_ids by parsing TOPOLOGY.
558        backbone_id_set = set()
559        for node in self.TOPOLOGY:
560            id = self.TOPOLOGY[node].get('backbone_network_id')
561            if id is not None:
562                backbone_id_set.add(id)
563
564        # Add default backbone network id if backbone_set is empty
565        if not backbone_id_set:
566            backbone_id_set.add(config.BACKBONE_DOCKER_NETWORK_DEFAULT_ID)
567
568        # Iterate over the backbone_set and create backbone network(s)
569        for id in backbone_id_set:
570            backbone = f'{config.BACKBONE_DOCKER_NETWORK_NAME}.{id}'
571            backbone_prefix = f'{config.BACKBONE_IPV6_ADDR_START}:{id}::/64'
572            self._backbone_network_names.append(backbone)
573            self.assure_run_ok(
574                f'docker network create --driver bridge --ipv6 --subnet {backbone_prefix} -o "com.docker.network.bridge.name"="{backbone}" {backbone} || true',
575                shell=True)
576
577    def _remove_backbone_network(self):
578        for network_name in self._backbone_network_names:
579            self.assure_run_ok(f'docker network rm {network_name}', shell=True)
580
581    def _start_backbone_sniffer(self):
582        assert self._backbone_network_names, 'Internal Error: self._backbone_network_names is empty'
583        # TODO: support sniffer on multiple backbone networks
584        sniffer_interface = self._backbone_network_names[0]
585
586        # don't know why but I have to create the empty bbr.pcap first, otherwise tshark won't work
587        # self.assure_run_ok("truncate --size 0 bbr.pcap && chmod 664 bbr.pcap", shell=True)
588        pcap_file = self._get_backbone_pcap_filename()
589        try:
590            os.remove(pcap_file)
591        except FileNotFoundError:
592            pass
593
594        dumpcap = pvutils.which_dumpcap()
595        self._dumpcap_proc = subprocess.Popen([dumpcap, '-i', sniffer_interface, '-w', pcap_file],
596                                              stdout=sys.stdout,
597                                              stderr=sys.stderr)
598        time.sleep(0.2)
599        assert self._dumpcap_proc.poll() is None, 'tshark terminated unexpectedly'
600        logging.info('Backbone sniffer launched successfully on interface %s, pid=%s', sniffer_interface,
601                     self._dumpcap_proc.pid)
602        os.chmod(pcap_file, stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
603
604    def _get_backbone_pcap_filename(self):
605        backbone_pcap = self.test_name + '_backbone.pcap'
606        return os.path.abspath(backbone_pcap)
607
608    def _get_merged_pcap_filename(self):
609        backbone_pcap = self.test_name + '_merged.pcap'
610        return os.path.abspath(backbone_pcap)
611
612    def _stop_backbone_sniffer(self):
613        self._dumpcap_proc.send_signal(signal.SIGTERM)
614        self._dumpcap_proc.__exit__(None, None, None)
615        logging.info('Backbone sniffer terminated successfully: pid=%s' % self._dumpcap_proc.pid)
616
617    def _merge_thread_backbone_pcaps(self):
618        thread_pcap = self._get_thread_pcap_filename()
619        backbone_pcap = self._get_backbone_pcap_filename()
620        merged_pcap = self._get_merged_pcap_filename()
621
622        mergecap = pvutils.which_mergecap()
623        self.assure_run_ok(f'{mergecap} -w {merged_pcap} {thread_pcap} {backbone_pcap}', shell=True)
624        return merged_pcap
625
626    def wait_until(self, cond: Callable[[], bool], timeout: int, go_interval: int = 1):
627        while True:
628            self.simulator.go(go_interval)
629
630            if cond():
631                break
632
633            timeout -= go_interval
634            if timeout <= 0:
635                raise RuntimeError(f'wait failed after {timeout} seconds')
636
637    def wait_node_state(self, node: Union[int, Node], state: str, timeout: int):
638        node = self.nodes[node] if isinstance(node, int) else node
639        self.wait_until(lambda: node.get_state() == state, timeout)
640
641    def wait_route_established(self, node1: int, node2: int, timeout=10):
642        node2_addr = self.nodes[node2].get_ip6_address(config.ADDRESS_TYPE.RLOC)
643
644        while timeout > 0:
645
646            if self.nodes[node1].ping(node2_addr):
647                break
648
649            self.simulator.go(1)
650            timeout -= 1
651
652        else:
653            raise Exception("Route between node %d and %d is not established" % (node1, node2))
654
655    def assertDictIncludes(self, actual: Mapping[str, str], expected: Mapping[str, str]):
656        """ Asserts the `actual` dict includes the `expected` dict.
657
658        Args:
659            actual: A dict for checking.
660            expected: The expected items that the actual dict should contains.
661        """
662        for k, v in expected.items():
663            if k not in actual:
664                raise AssertionError(f"key {k} is not found in first dict")
665            if v != actual[k]:
666                raise AssertionError(f"{repr(actual[k])} != {repr(v)} for key {k}")
667