Lines Matching +full:data +full:- +full:mirror
1 // SPDX-License-Identifier: GPL-2.0
4 * the linux kernel to help device drivers mirror a process address space in
6 * makes communication and data exchange a lot easier.
39 void *mirror; member
58 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
131 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP()
132 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP()
134 self->fd = hmm_open(variant->device_number); in FIXTURE_SETUP()
135 if (self->fd < 0 && hmm_is_coherent_type(variant->device_number)) in FIXTURE_SETUP()
137 ASSERT_GE(self->fd, 0); in FIXTURE_SETUP()
142 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP()
143 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP()
145 self->fd0 = hmm_open(variant->device_number0); in FIXTURE_SETUP()
146 if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0)) in FIXTURE_SETUP()
148 ASSERT_GE(self->fd0, 0); in FIXTURE_SETUP()
149 self->fd1 = hmm_open(variant->device_number1); in FIXTURE_SETUP()
150 ASSERT_GE(self->fd1, 0); in FIXTURE_SETUP()
155 int ret = close(self->fd); in FIXTURE_TEARDOWN()
158 self->fd = -1; in FIXTURE_TEARDOWN()
163 int ret = close(self->fd0); in FIXTURE_TEARDOWN()
166 self->fd0 = -1; in FIXTURE_TEARDOWN()
168 ret = close(self->fd1); in FIXTURE_TEARDOWN()
170 self->fd1 = -1; in FIXTURE_TEARDOWN()
182 cmd.addr = (__u64)buffer->ptr; in hmm_dmirror_cmd()
183 cmd.ptr = (__u64)buffer->mirror; in hmm_dmirror_cmd()
192 return -errno; in hmm_dmirror_cmd()
194 buffer->cpages = cmd.cpages; in hmm_dmirror_cmd()
195 buffer->faults = cmd.faults; in hmm_dmirror_cmd()
205 if (buffer->ptr) in hmm_buffer_free()
206 munmap(buffer->ptr, buffer->size); in hmm_buffer_free()
207 free(buffer->mirror); in hmm_buffer_free()
226 } while (r == -1 && errno == EINTR); in hmm_create_file()
231 return -1; in hmm_create_file()
239 static int fd = -1; in hmm_random()
297 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
299 size = npages << self->page_shift; in TEST_F()
304 buffer->fd = -1; in TEST_F()
305 buffer->size = size; in TEST_F()
306 buffer->mirror = malloc(size); in TEST_F()
307 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
309 buffer->ptr = mmap(NULL, size, in TEST_F()
312 buffer->fd, 0); in TEST_F()
313 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
319 i = 2 * self->page_size / sizeof(*ptr); in TEST_F()
320 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
323 /* Set buffer permission to read-only. */ in TEST_F()
324 ret = mprotect(buffer->ptr, size, PROT_READ); in TEST_F()
328 val = *(int *)(buffer->ptr + self->page_size); in TEST_F()
332 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
334 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
335 ASSERT_EQ(buffer->faults, 1); in TEST_F()
338 ptr = buffer->mirror; in TEST_F()
339 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i) in TEST_F()
360 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
362 size = npages << self->page_shift; in TEST_F()
367 buffer->fd = -1; in TEST_F()
368 buffer->size = size; in TEST_F()
369 buffer->mirror = malloc(size); in TEST_F()
370 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
372 buffer->ptr = mmap(NULL, size, in TEST_F()
375 buffer->fd, 0); in TEST_F()
376 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
379 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
382 /* Initialize mirror buffer so we can verify it isn't written. */ in TEST_F()
383 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
384 ptr[i] = -i; in TEST_F()
387 ret = mprotect(buffer->ptr, size, PROT_NONE); in TEST_F()
391 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
392 ASSERT_EQ(ret, -EFAULT); in TEST_F()
395 ret = mprotect(buffer->ptr, size, PROT_READ); in TEST_F()
397 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
401 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
402 ASSERT_EQ(ptr[i], -i); in TEST_F()
419 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
421 size = npages << self->page_shift; in TEST_F()
426 buffer->fd = -1; in TEST_F()
427 buffer->size = size; in TEST_F()
428 buffer->mirror = malloc(size); in TEST_F()
429 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
431 buffer->ptr = mmap(NULL, size, in TEST_F()
434 buffer->fd, 0); in TEST_F()
435 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
437 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
438 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
442 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
444 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
445 ASSERT_EQ(buffer->faults, 1); in TEST_F()
448 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
467 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
469 size = npages << self->page_shift; in TEST_F()
474 buffer->fd = -1; in TEST_F()
475 buffer->size = size; in TEST_F()
476 buffer->mirror = malloc(size); in TEST_F()
477 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
479 buffer->ptr = mmap(NULL, size, in TEST_F()
482 buffer->fd, 0); in TEST_F()
483 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
486 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1); in TEST_F()
488 ASSERT_EQ(buffer->cpages, 1); in TEST_F()
489 ASSERT_EQ(buffer->faults, 1); in TEST_F()
491 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
492 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
496 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
497 ASSERT_EQ(ret, -EPERM); in TEST_F()
500 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
504 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ); in TEST_F()
508 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
510 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
511 ASSERT_EQ(buffer->faults, 1); in TEST_F()
514 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
522 * will copy-on-write if a child process inherits the mapping.
535 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
537 size = npages << self->page_shift; in TEST_F()
542 buffer->fd = -1; in TEST_F()
543 buffer->size = size; in TEST_F()
544 buffer->mirror = malloc(size); in TEST_F()
545 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
547 buffer->ptr = mmap(NULL, size, in TEST_F()
550 buffer->fd, 0); in TEST_F()
551 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
553 /* Initialize buffer->ptr so we can tell if it is written. */ in TEST_F()
554 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
557 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
558 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
559 ptr[i] = -i; in TEST_F()
562 if (pid == -1) in TEST_F()
569 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
575 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
577 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
578 ASSERT_EQ(ptr[i], -i); in TEST_F()
580 /* The child process needs its own mirror to its own mm. */ in TEST_F()
587 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
588 ASSERT_EQ(buffer->faults, 1); in TEST_F()
591 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
592 ASSERT_EQ(ptr[i], -i); in TEST_F()
600 * will not copy-on-write if a child process inherits the mapping.
613 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
615 size = npages << self->page_shift; in TEST_F()
620 buffer->fd = -1; in TEST_F()
621 buffer->size = size; in TEST_F()
622 buffer->mirror = malloc(size); in TEST_F()
623 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
625 buffer->ptr = mmap(NULL, size, in TEST_F()
628 buffer->fd, 0); in TEST_F()
629 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
631 /* Initialize buffer->ptr so we can tell if it is written. */ in TEST_F()
632 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
635 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
636 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
637 ptr[i] = -i; in TEST_F()
640 if (pid == -1) in TEST_F()
647 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
648 ASSERT_EQ(ptr[i], -i); in TEST_F()
653 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
655 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
656 ASSERT_EQ(ptr[i], -i); in TEST_F()
658 /* The child process needs its own mirror to its own mm. */ in TEST_F()
665 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
666 ASSERT_EQ(buffer->faults, 1); in TEST_F()
669 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
670 ASSERT_EQ(ptr[i], -i); in TEST_F()
695 buffer->fd = -1; in TEST_F()
696 buffer->size = size; in TEST_F()
697 buffer->mirror = malloc(size); in TEST_F()
698 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
700 buffer->ptr = mmap(NULL, size, in TEST_F()
703 buffer->fd, 0); in TEST_F()
704 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
707 npages = size >> self->page_shift; in TEST_F()
708 map = (void *)ALIGN((uintptr_t)buffer->ptr, size); in TEST_F()
711 old_ptr = buffer->ptr; in TEST_F()
712 buffer->ptr = map; in TEST_F()
714 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
715 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
719 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
721 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
722 ASSERT_EQ(buffer->faults, 1); in TEST_F()
725 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
728 buffer->ptr = old_ptr; in TEST_F()
733 * Read numeric data from raw and tagged kernel status files. Used to read
734 * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
747 return -1; in file_read_ulong()
754 return -1; in file_read_ulong()
758 return -1; in file_read_ulong()
766 return -1; /* looks like the line we want isn't there */ in file_read_ulong()
774 return -1; in file_read_ulong()
799 npages = size >> self->page_shift; in TEST_F()
804 buffer->ptr = mmap(NULL, size, in TEST_F()
807 -1, 0); in TEST_F()
808 if (buffer->ptr == MAP_FAILED) { in TEST_F()
813 buffer->fd = -1; in TEST_F()
814 buffer->size = size; in TEST_F()
815 buffer->mirror = malloc(size); in TEST_F()
816 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
818 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
819 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
823 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
825 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
826 ASSERT_EQ(buffer->faults, 1); in TEST_F()
829 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
832 munmap(buffer->ptr, buffer->size); in TEST_F()
833 buffer->ptr = NULL; in TEST_F()
851 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
853 size = npages << self->page_shift; in TEST_F()
861 buffer->fd = fd; in TEST_F()
862 buffer->size = size; in TEST_F()
863 buffer->mirror = malloc(size); in TEST_F()
864 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
867 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
869 len = pwrite(fd, buffer->mirror, size, 0); in TEST_F()
871 memset(buffer->mirror, 0, size); in TEST_F()
873 buffer->ptr = mmap(NULL, size, in TEST_F()
876 buffer->fd, 0); in TEST_F()
877 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
880 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
882 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
883 ASSERT_EQ(buffer->faults, 1); in TEST_F()
886 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
906 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
908 size = npages << self->page_shift; in TEST_F()
916 buffer->fd = fd; in TEST_F()
917 buffer->size = size; in TEST_F()
918 buffer->mirror = malloc(size); in TEST_F()
919 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
921 buffer->ptr = mmap(NULL, size, in TEST_F()
924 buffer->fd, 0); in TEST_F()
925 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
927 /* Initialize data that the device will write to buffer->ptr. */ in TEST_F()
928 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
932 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
934 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
935 ASSERT_EQ(buffer->faults, 1); in TEST_F()
938 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
942 len = pread(fd, buffer->mirror, size, 0); in TEST_F()
944 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
962 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
964 size = npages << self->page_shift; in TEST_F()
969 buffer->fd = -1; in TEST_F()
970 buffer->size = size; in TEST_F()
971 buffer->mirror = malloc(size); in TEST_F()
972 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
974 buffer->ptr = mmap(NULL, size, in TEST_F()
977 buffer->fd, 0); in TEST_F()
978 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
981 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
985 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
987 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
990 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1010 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1012 size = npages << self->page_shift; in TEST_F()
1017 buffer->fd = -1; in TEST_F()
1018 buffer->size = size; in TEST_F()
1019 buffer->mirror = malloc(size); in TEST_F()
1020 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1022 buffer->ptr = mmap(NULL, size, in TEST_F()
1025 buffer->fd, 0); in TEST_F()
1026 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1029 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1033 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1035 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1038 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1042 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i) in TEST_F()
1046 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1048 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1051 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1066 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1068 size = npages << self->page_shift; in TEST_F()
1073 buffer->fd = -1; in TEST_F()
1074 buffer->size = size; in TEST_F()
1075 buffer->mirror = malloc(size); in TEST_F()
1076 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1078 buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, in TEST_F()
1079 MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0); in TEST_F()
1080 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1083 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1087 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1089 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1092 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1096 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages); in TEST_F()
1100 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i) in TEST_F()
1116 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1118 size = npages << self->page_shift; in TEST_F()
1123 buffer->fd = -1; in TEST_F()
1124 buffer->size = size; in TEST_F()
1125 buffer->mirror = malloc(size); in TEST_F()
1126 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1128 buffer->ptr = mmap(NULL, size, in TEST_F()
1131 buffer->fd, 0); in TEST_F()
1132 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1135 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1136 ASSERT_EQ(ret, -ENOENT); in TEST_F()
1155 size = npages << self->page_shift; in TEST_F()
1160 buffer->fd = -1; in TEST_F()
1161 buffer->size = size; in TEST_F()
1162 buffer->mirror = malloc(size); in TEST_F()
1163 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1166 buffer->ptr = mmap(NULL, size, in TEST_F()
1169 buffer->fd, 0); in TEST_F()
1170 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1171 p = buffer->ptr; in TEST_F()
1174 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages); in TEST_F()
1175 ASSERT_EQ(ret, -EINVAL); in TEST_F()
1178 ret = munmap(buffer->ptr + self->page_size, self->page_size); in TEST_F()
1182 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3); in TEST_F()
1183 ASSERT_EQ(ret, -EINVAL); in TEST_F()
1185 /* Page 2 will be a read-only zero page. */ in TEST_F()
1186 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size, in TEST_F()
1189 ptr = (int *)(buffer->ptr + 2 * self->page_size); in TEST_F()
1193 /* Page 3 will be read-only. */ in TEST_F()
1194 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F()
1197 ptr = (int *)(buffer->ptr + 3 * self->page_size); in TEST_F()
1199 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F()
1203 /* Page 4-5 will be read-write. */ in TEST_F()
1204 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size, in TEST_F()
1207 ptr = (int *)(buffer->ptr + 4 * self->page_size); in TEST_F()
1209 ptr = (int *)(buffer->ptr + 5 * self->page_size); in TEST_F()
1212 /* Now try to migrate pages 2-5 to device 1. */ in TEST_F()
1213 buffer->ptr = p + 2 * self->page_size; in TEST_F()
1214 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4); in TEST_F()
1216 ASSERT_EQ(buffer->cpages, 4); in TEST_F()
1219 buffer->ptr = p + 5 * self->page_size; in TEST_F()
1220 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); in TEST_F()
1221 ASSERT_EQ(ret, -ENOENT); in TEST_F()
1222 buffer->ptr = p; in TEST_F()
1224 buffer->ptr = p; in TEST_F()
1246 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1248 size = npages << self->page_shift; in TEST_F()
1254 buffer->fd = -1; in TEST_F()
1255 buffer->size = size; in TEST_F()
1256 buffer->mirror = malloc(size); in TEST_F()
1257 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1259 buffer->ptr = mmap(NULL, size, in TEST_F()
1262 buffer->fd, 0); in TEST_F()
1263 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1266 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1270 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1272 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1275 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1279 if (hmm_is_coherent_type(variant->device_number)) { in TEST_F()
1280 ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages); in TEST_F()
1282 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1285 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1305 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1307 size = npages << self->page_shift; in TEST_F()
1313 buffer->fd = -1; in TEST_F()
1314 buffer->size = size; in TEST_F()
1315 buffer->mirror = malloc(size); in TEST_F()
1316 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1318 buffer->ptr = mmap(NULL, size, in TEST_F()
1321 buffer->fd, 0); in TEST_F()
1322 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1325 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1329 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, in TEST_F()
1332 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1333 ASSERT_EQ(buffer->faults, 1); in TEST_F()
1336 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1349 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2); in unmap_buffer()
1350 buffer->ptr = NULL; in unmap_buffer()
1365 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1367 size = npages << self->page_shift; in TEST_F()
1379 buffer->fd = -1; in TEST_F()
1380 buffer->size = size; in TEST_F()
1381 buffer->mirror = malloc(size); in TEST_F()
1382 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1384 buffer->ptr = mmap(NULL, size, in TEST_F()
1387 buffer->fd, 0); in TEST_F()
1388 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1391 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1398 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, in TEST_F()
1401 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1402 ASSERT_EQ(buffer->faults, 1); in TEST_F()
1405 for (i = 0, ptr = buffer->mirror; in TEST_F()
1428 size = npages << self->page_shift; in TEST_F()
1433 buffer->fd = -1; in TEST_F()
1434 buffer->size = size; in TEST_F()
1435 buffer->mirror = malloc(npages); in TEST_F()
1436 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1440 buffer->ptr = mmap(NULL, size, in TEST_F()
1443 self->fd, 0); in TEST_F()
1444 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1447 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
1449 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1452 m = buffer->mirror; in TEST_F()
1473 size = npages << self->page_shift; in TEST_F()
1478 buffer->fd = -1; in TEST_F()
1479 buffer->size = size; in TEST_F()
1480 buffer->mirror = malloc(npages); in TEST_F()
1481 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1484 buffer->ptr = mmap(NULL, size, in TEST_F()
1487 buffer->fd, 0); in TEST_F()
1488 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1489 p = buffer->ptr; in TEST_F()
1492 ret = munmap(buffer->ptr + self->page_size, self->page_size); in TEST_F()
1495 /* Page 2 will be read-only zero page. */ in TEST_F()
1496 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size, in TEST_F()
1499 ptr = (int *)(buffer->ptr + 2 * self->page_size); in TEST_F()
1503 /* Page 3 will be read-only. */ in TEST_F()
1504 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F()
1507 ptr = (int *)(buffer->ptr + 3 * self->page_size); in TEST_F()
1509 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F()
1513 /* Page 4-6 will be read-write. */ in TEST_F()
1514 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size, in TEST_F()
1517 ptr = (int *)(buffer->ptr + 4 * self->page_size); in TEST_F()
1521 buffer->ptr = p + 5 * self->page_size; in TEST_F()
1522 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); in TEST_F()
1524 ASSERT_EQ(buffer->cpages, 1); in TEST_F()
1527 buffer->ptr = p + 6 * self->page_size; in TEST_F()
1528 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1); in TEST_F()
1530 ASSERT_EQ(buffer->cpages, 1); in TEST_F()
1533 buffer->ptr = p; in TEST_F()
1534 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
1536 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1539 m = buffer->mirror; in TEST_F()
1545 if (!hmm_is_coherent_type(variant->device_number0)) { in TEST_F()
1582 npages = size >> self->page_shift; in TEST_F()
1587 buffer->ptr = mmap(NULL, size, in TEST_F()
1590 -1, 0); in TEST_F()
1591 if (buffer->ptr == MAP_FAILED) { in TEST_F()
1596 buffer->size = size; in TEST_F()
1597 buffer->mirror = malloc(npages); in TEST_F()
1598 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1600 /* Initialize the pages the device will snapshot in buffer->ptr. */ in TEST_F()
1601 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1605 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
1607 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1610 m = buffer->mirror; in TEST_F()
1615 /* Make the region read-only. */ in TEST_F()
1616 ret = mprotect(buffer->ptr, size, PROT_READ); in TEST_F()
1620 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
1622 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1625 m = buffer->mirror; in TEST_F()
1630 munmap(buffer->ptr, buffer->size); in TEST_F()
1631 buffer->ptr = NULL; in TEST_F()
1648 size = npages << self->page_shift; in TEST_F()
1653 buffer->fd = -1; in TEST_F()
1654 buffer->size = size; in TEST_F()
1655 buffer->mirror = malloc(npages); in TEST_F()
1656 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1659 buffer->ptr = mmap(NULL, size, in TEST_F()
1662 buffer->fd, 0); in TEST_F()
1663 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1666 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1669 /* Make region read-only. */ in TEST_F()
1670 ret = mprotect(buffer->ptr, size, PROT_READ); in TEST_F()
1674 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
1676 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1677 ASSERT_EQ(buffer->faults, 1); in TEST_F()
1680 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1684 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
1686 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1687 ASSERT_EQ(buffer->faults, 1); in TEST_F()
1690 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1694 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages); in TEST_F()
1696 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1698 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages); in TEST_F()
1700 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1701 ASSERT_EQ(buffer->faults, 1); in TEST_F()
1704 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1722 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1724 size = npages << self->page_shift; in TEST_F()
1729 buffer->fd = -1; in TEST_F()
1730 buffer->size = size; in TEST_F()
1731 buffer->mirror = malloc(size); in TEST_F()
1732 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1734 buffer->ptr = mmap(NULL, size, in TEST_F()
1737 buffer->fd, 0); in TEST_F()
1738 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1741 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1745 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages); in TEST_F()
1747 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1750 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1754 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1757 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1761 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages); in TEST_F()
1776 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1778 size = npages << self->page_shift; in TEST_F()
1783 buffer->fd = -1; in TEST_F()
1784 buffer->size = size; in TEST_F()
1785 buffer->mirror = malloc(size); in TEST_F()
1786 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1788 buffer->ptr = mmap(NULL, size, in TEST_F()
1791 buffer->fd, 0); in TEST_F()
1792 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1795 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1799 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages); in TEST_F()
1801 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1804 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1807 ret = mprotect(buffer->ptr, size, PROT_READ); in TEST_F()
1811 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages); in TEST_F()
1812 ASSERT_EQ(ret, -EPERM); in TEST_F()
1818 * Check copy-on-write works.
1829 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
1831 size = npages << self->page_shift; in TEST_F()
1836 buffer->fd = -1; in TEST_F()
1837 buffer->size = size; in TEST_F()
1838 buffer->mirror = malloc(size); in TEST_F()
1839 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1841 buffer->ptr = mmap(NULL, size, in TEST_F()
1844 buffer->fd, 0); in TEST_F()
1845 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1848 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1852 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages); in TEST_F()
1854 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1859 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1862 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1905 if (gup_fd == -1) in TEST_F()
1909 size = npages << self->page_shift; in TEST_F()
1914 buffer->fd = -1; in TEST_F()
1915 buffer->size = size; in TEST_F()
1916 buffer->mirror = malloc(size); in TEST_F()
1917 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
1919 buffer->ptr = mmap(NULL, size, in TEST_F()
1922 buffer->fd, 0); in TEST_F()
1923 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
1926 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1930 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
1932 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1934 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) in TEST_F()
1938 (unsigned long)buffer->ptr, in TEST_F()
1939 GUP_BASIC_TEST, 1, self->page_size, 0), 0); in TEST_F()
1941 (unsigned long)buffer->ptr + 1 * self->page_size, in TEST_F()
1942 GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0); in TEST_F()
1944 (unsigned long)buffer->ptr + 2 * self->page_size, in TEST_F()
1945 PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0); in TEST_F()
1947 (unsigned long)buffer->ptr + 3 * self->page_size, in TEST_F()
1948 PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0); in TEST_F()
1951 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
1953 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
1954 m = buffer->mirror; in TEST_F()
1955 if (hmm_is_coherent_type(variant->device_number)) { in TEST_F()
1966 * corrupted data. in TEST_F()
1968 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
1976 * Test copy-on-write in device pages.
1995 size = npages << self->page_shift; in TEST_F()
2000 buffer->fd = -1; in TEST_F()
2001 buffer->size = size; in TEST_F()
2002 buffer->mirror = malloc(size); in TEST_F()
2003 ASSERT_NE(buffer->mirror, NULL); in TEST_F()
2005 buffer->ptr = mmap(NULL, size, in TEST_F()
2008 buffer->fd, 0); in TEST_F()
2009 ASSERT_NE(buffer->ptr, MAP_FAILED); in TEST_F()
2012 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
2017 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); in TEST_F()
2019 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
2022 if (pid == -1) in TEST_F()
2035 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) in TEST_F()
2045 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); in TEST_F()
2047 ASSERT_EQ(buffer->cpages, npages); in TEST_F()
2048 m = buffer->mirror; in TEST_F()