Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools: update hmm-test to support device coherent type

Test cases such as migrate_fault and migrate_multiple, were modified to
explicit migrate from device to sys memory without the need of page
faults, when using device coherent type.

Snapshot test case updated to read memory device type first and based on
that, get the proper returned results migrate_ping_pong test case added to
test explicit migration from device to sys memory for both private and
coherent zone types.

Helpers to migrate from device to sys memory and vicerversa were also
added.

Link: https://lkml.kernel.org/r/20220715150521.18165-12-alex.sierra@amd.com
Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Alex Sierra and committed by
akpm
f70dab3c 4c2e0f76

+100 -21
+100 -21
tools/testing/selftests/vm/hmm-tests.c
··· 46 46 uint64_t faults; 47 47 }; 48 48 49 + enum { 50 + HMM_PRIVATE_DEVICE_ONE, 51 + HMM_PRIVATE_DEVICE_TWO, 52 + HMM_COHERENCE_DEVICE_ONE, 53 + HMM_COHERENCE_DEVICE_TWO, 54 + }; 55 + 49 56 #define TWOMEG (1 << 21) 50 57 #define HMM_BUFFER_SIZE (1024 << 12) 51 58 #define HMM_PATH_MAX 64 ··· 67 60 unsigned int page_shift; 68 61 }; 69 62 63 + FIXTURE_VARIANT(hmm) 64 + { 65 + int device_number; 66 + }; 67 + 68 + FIXTURE_VARIANT_ADD(hmm, hmm_device_private) 69 + { 70 + .device_number = HMM_PRIVATE_DEVICE_ONE, 71 + }; 72 + 73 + FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent) 74 + { 75 + .device_number = HMM_COHERENCE_DEVICE_ONE, 76 + }; 77 + 70 78 FIXTURE(hmm2) 71 79 { 72 80 int fd0; 73 81 int fd1; 74 82 unsigned int page_size; 75 83 unsigned int page_shift; 84 + }; 85 + 86 + FIXTURE_VARIANT(hmm2) 87 + { 88 + int device_number0; 89 + int device_number1; 90 + }; 91 + 92 + FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private) 93 + { 94 + .device_number0 = HMM_PRIVATE_DEVICE_ONE, 95 + .device_number1 = HMM_PRIVATE_DEVICE_TWO, 96 + }; 97 + 98 + FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent) 99 + { 100 + .device_number0 = HMM_COHERENCE_DEVICE_ONE, 101 + .device_number1 = HMM_COHERENCE_DEVICE_TWO, 76 102 }; 77 103 78 104 static int hmm_open(int unit) ··· 121 81 return fd; 122 82 } 123 83 84 + static bool hmm_is_coherent_type(int dev_num) 85 + { 86 + return (dev_num >= HMM_COHERENCE_DEVICE_ONE); 87 + } 88 + 124 89 FIXTURE_SETUP(hmm) 125 90 { 126 91 self->page_size = sysconf(_SC_PAGE_SIZE); 127 92 self->page_shift = ffs(self->page_size) - 1; 128 93 129 - self->fd = hmm_open(0); 94 + self->fd = hmm_open(variant->device_number); 95 + if (self->fd < 0 && hmm_is_coherent_type(variant->device_number)) 96 + SKIP(exit(0), "DEVICE_COHERENT not available"); 130 97 ASSERT_GE(self->fd, 0); 131 98 } 132 99 ··· 142 95 self->page_size = sysconf(_SC_PAGE_SIZE); 143 96 self->page_shift = ffs(self->page_size) - 1; 144 97 145 - self->fd0 = hmm_open(0); 98 + self->fd0 = hmm_open(variant->device_number0); 99 + if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0)) 100 + SKIP(exit(0), "DEVICE_COHERENT not available"); 146 101 ASSERT_GE(self->fd0, 0); 147 - self->fd1 = hmm_open(1); 102 + self->fd1 = hmm_open(variant->device_number1); 148 103 ASSERT_GE(self->fd1, 0); 149 104 } 150 105 ··· 258 209 t.tv_sec = 0; 259 210 t.tv_nsec = n; 260 211 nanosleep(&t, NULL); 212 + } 213 + 214 + static int hmm_migrate_sys_to_dev(int fd, 215 + struct hmm_buffer *buffer, 216 + unsigned long npages) 217 + { 218 + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); 219 + } 220 + 221 + static int hmm_migrate_dev_to_sys(int fd, 222 + struct hmm_buffer *buffer, 223 + unsigned long npages) 224 + { 225 + return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); 261 226 } 262 227 263 228 /* ··· 938 875 ptr[i] = i; 939 876 940 877 /* Migrate memory to device. */ 941 - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); 878 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 942 879 ASSERT_EQ(ret, 0); 943 880 ASSERT_EQ(buffer->cpages, npages); 944 881 ··· 986 923 ptr[i] = i; 987 924 988 925 /* Migrate memory to device. */ 989 - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); 926 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 990 927 ASSERT_EQ(ret, 0); 991 928 ASSERT_EQ(buffer->cpages, npages); 992 929 ··· 999 936 ASSERT_EQ(ptr[i], i); 1000 937 1001 938 /* Migrate memory to the device again. */ 1002 - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); 939 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 1003 940 ASSERT_EQ(ret, 0); 1004 941 ASSERT_EQ(buffer->cpages, npages); 1005 942 ··· 1039 976 ASSERT_NE(buffer->ptr, MAP_FAILED); 1040 977 1041 978 /* Migrate memory to device. */ 1042 - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages); 979 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 1043 980 ASSERT_EQ(ret, -ENOENT); 1044 981 1045 982 hmm_buffer_free(buffer); ··· 1078 1015 p = buffer->ptr; 1079 1016 1080 1017 /* Migrating a protected area should be an error. */ 1081 - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages); 1018 + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages); 1082 1019 ASSERT_EQ(ret, -EINVAL); 1083 1020 1084 1021 /* Punch a hole after the first page address. */ ··· 1086 1023 ASSERT_EQ(ret, 0); 1087 1024 1088 1025 /* We expect an error if the vma doesn't cover the range. */ 1089 - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3); 1026 + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3); 1090 1027 ASSERT_EQ(ret, -EINVAL); 1091 1028 1092 1029 /* Page 2 will be a read-only zero page. */ ··· 1118 1055 1119 1056 /* Now try to migrate pages 2-5 to device 1. */ 1120 1057 buffer->ptr = p + 2 * self->page_size; 1121 - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4); 1058 + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4); 1122 1059 ASSERT_EQ(ret, 0); 1123 1060 ASSERT_EQ(buffer->cpages, 4); 1124 1061 1125 1062 /* Page 5 won't be migrated to device 0 because it's on device 1. */ 1126 1063 buffer->ptr = p + 5 * self->page_size; 1127 - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); 1064 + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); 1128 1065 ASSERT_EQ(ret, -ENOENT); 1129 1066 buffer->ptr = p; 1130 1067 ··· 1133 1070 } 1134 1071 1135 1072 /* 1136 - * Migrate anonymous memory to device private memory and fault it back to system 1137 - * memory multiple times. 1073 + * Migrate anonymous memory to device memory and back to system memory 1074 + * multiple times. In case of private zone configuration, this is done 1075 + * through fault pages accessed by CPU. In case of coherent zone configuration, 1076 + * the pages from the device should be explicitly migrated back to system memory. 1077 + * The reason is Coherent device zone has coherent access by CPU, therefore 1078 + * it will not generate any page fault. 1138 1079 */ 1139 1080 TEST_F(hmm, migrate_multiple) 1140 1081 { ··· 1174 1107 ptr[i] = i; 1175 1108 1176 1109 /* Migrate memory to device. */ 1177 - ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, 1178 - npages); 1110 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 1179 1111 ASSERT_EQ(ret, 0); 1180 1112 ASSERT_EQ(buffer->cpages, npages); 1181 1113 ··· 1182 1116 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) 1183 1117 ASSERT_EQ(ptr[i], i); 1184 1118 1185 - /* Fault pages back to system memory and check them. */ 1119 + /* Migrate back to system memory and check them. */ 1120 + if (hmm_is_coherent_type(variant->device_number)) { 1121 + ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages); 1122 + ASSERT_EQ(ret, 0); 1123 + ASSERT_EQ(buffer->cpages, npages); 1124 + } 1125 + 1186 1126 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) 1187 1127 ASSERT_EQ(ptr[i], i); 1188 1128 ··· 1426 1354 1427 1355 /* Page 5 will be migrated to device 0. */ 1428 1356 buffer->ptr = p + 5 * self->page_size; 1429 - ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1); 1357 + ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1); 1430 1358 ASSERT_EQ(ret, 0); 1431 1359 ASSERT_EQ(buffer->cpages, 1); 1432 1360 1433 1361 /* Page 6 will be migrated to device 1. */ 1434 1362 buffer->ptr = p + 6 * self->page_size; 1435 - ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1); 1363 + ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1); 1436 1364 ASSERT_EQ(ret, 0); 1437 1365 ASSERT_EQ(buffer->cpages, 1); 1438 1366 ··· 1449 1377 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ); 1450 1378 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ); 1451 1379 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE); 1452 - ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | 1453 - HMM_DMIRROR_PROT_WRITE); 1454 - ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); 1380 + if (!hmm_is_coherent_type(variant->device_number0)) { 1381 + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL | 1382 + HMM_DMIRROR_PROT_WRITE); 1383 + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE); 1384 + } else { 1385 + ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | 1386 + HMM_DMIRROR_PROT_WRITE); 1387 + ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE | 1388 + HMM_DMIRROR_PROT_WRITE); 1389 + } 1455 1390 1456 1391 hmm_buffer_free(buffer); 1457 1392 }