Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/powerpc: Add ptrace tests for VSX, VMX registers

This patch adds ptrace interface test for VSX, VMX registers.
This also adds ptrace interface based helper functions related
to VSX, VMX registers access. This also adds some assembly
helper functions related to VSX and VMX registers.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Anshuman Khandual and committed by
Michael Ellerman
0da535c0 01f7fdc7

+630 -1
+265
tools/testing/selftests/powerpc/lib/reg.S
··· 130 130 stfs 31, 31*4(3) 131 131 blr 132 132 FUNC_END(store_fpr_single_precision) 133 + 134 + /* VMX/VSX registers - unsigned long buf[128] */ 135 + FUNC_START(loadvsx) 136 + lis 4, 0 137 + LXVD2X (0,(4),(3)) 138 + addi 4, 4, 16 139 + LXVD2X (1,(4),(3)) 140 + addi 4, 4, 16 141 + LXVD2X (2,(4),(3)) 142 + addi 4, 4, 16 143 + LXVD2X (3,(4),(3)) 144 + addi 4, 4, 16 145 + LXVD2X (4,(4),(3)) 146 + addi 4, 4, 16 147 + LXVD2X (5,(4),(3)) 148 + addi 4, 4, 16 149 + LXVD2X (6,(4),(3)) 150 + addi 4, 4, 16 151 + LXVD2X (7,(4),(3)) 152 + addi 4, 4, 16 153 + LXVD2X (8,(4),(3)) 154 + addi 4, 4, 16 155 + LXVD2X (9,(4),(3)) 156 + addi 4, 4, 16 157 + LXVD2X (10,(4),(3)) 158 + addi 4, 4, 16 159 + LXVD2X (11,(4),(3)) 160 + addi 4, 4, 16 161 + LXVD2X (12,(4),(3)) 162 + addi 4, 4, 16 163 + LXVD2X (13,(4),(3)) 164 + addi 4, 4, 16 165 + LXVD2X (14,(4),(3)) 166 + addi 4, 4, 16 167 + LXVD2X (15,(4),(3)) 168 + addi 4, 4, 16 169 + LXVD2X (16,(4),(3)) 170 + addi 4, 4, 16 171 + LXVD2X (17,(4),(3)) 172 + addi 4, 4, 16 173 + LXVD2X (18,(4),(3)) 174 + addi 4, 4, 16 175 + LXVD2X (19,(4),(3)) 176 + addi 4, 4, 16 177 + LXVD2X (20,(4),(3)) 178 + addi 4, 4, 16 179 + LXVD2X (21,(4),(3)) 180 + addi 4, 4, 16 181 + LXVD2X (22,(4),(3)) 182 + addi 4, 4, 16 183 + LXVD2X (23,(4),(3)) 184 + addi 4, 4, 16 185 + LXVD2X (24,(4),(3)) 186 + addi 4, 4, 16 187 + LXVD2X (25,(4),(3)) 188 + addi 4, 4, 16 189 + LXVD2X (26,(4),(3)) 190 + addi 4, 4, 16 191 + LXVD2X (27,(4),(3)) 192 + addi 4, 4, 16 193 + LXVD2X (28,(4),(3)) 194 + addi 4, 4, 16 195 + LXVD2X (29,(4),(3)) 196 + addi 4, 4, 16 197 + LXVD2X (30,(4),(3)) 198 + addi 4, 4, 16 199 + LXVD2X (31,(4),(3)) 200 + addi 4, 4, 16 201 + LXVD2X (32,(4),(3)) 202 + addi 4, 4, 16 203 + LXVD2X (33,(4),(3)) 204 + addi 4, 4, 16 205 + LXVD2X (34,(4),(3)) 206 + addi 4, 4, 16 207 + LXVD2X (35,(4),(3)) 208 + addi 4, 4, 16 209 + LXVD2X (36,(4),(3)) 210 + addi 4, 4, 16 211 + LXVD2X (37,(4),(3)) 212 + addi 4, 4, 16 213 + LXVD2X (38,(4),(3)) 214 + addi 4, 4, 16 215 + LXVD2X (39,(4),(3)) 216 + addi 4, 4, 16 217 + LXVD2X (40,(4),(3)) 218 + addi 4, 4, 16 219 + LXVD2X (41,(4),(3)) 220 + addi 4, 4, 16 221 + LXVD2X (42,(4),(3)) 222 + addi 4, 4, 16 223 + LXVD2X (43,(4),(3)) 224 + addi 4, 4, 16 225 + LXVD2X (44,(4),(3)) 226 + addi 4, 4, 16 227 + LXVD2X (45,(4),(3)) 228 + addi 4, 4, 16 229 + LXVD2X (46,(4),(3)) 230 + addi 4, 4, 16 231 + LXVD2X (47,(4),(3)) 232 + addi 4, 4, 16 233 + LXVD2X (48,(4),(3)) 234 + addi 4, 4, 16 235 + LXVD2X (49,(4),(3)) 236 + addi 4, 4, 16 237 + LXVD2X (50,(4),(3)) 238 + addi 4, 4, 16 239 + LXVD2X (51,(4),(3)) 240 + addi 4, 4, 16 241 + LXVD2X (52,(4),(3)) 242 + addi 4, 4, 16 243 + LXVD2X (53,(4),(3)) 244 + addi 4, 4, 16 245 + LXVD2X (54,(4),(3)) 246 + addi 4, 4, 16 247 + LXVD2X (55,(4),(3)) 248 + addi 4, 4, 16 249 + LXVD2X (56,(4),(3)) 250 + addi 4, 4, 16 251 + LXVD2X (57,(4),(3)) 252 + addi 4, 4, 16 253 + LXVD2X (58,(4),(3)) 254 + addi 4, 4, 16 255 + LXVD2X (59,(4),(3)) 256 + addi 4, 4, 16 257 + LXVD2X (60,(4),(3)) 258 + addi 4, 4, 16 259 + LXVD2X (61,(4),(3)) 260 + addi 4, 4, 16 261 + LXVD2X (62,(4),(3)) 262 + addi 4, 4, 16 263 + LXVD2X (63,(4),(3)) 264 + blr 265 + FUNC_END(loadvsx) 266 + 267 + FUNC_START(storevsx) 268 + lis 4, 0 269 + STXVD2X (0,(4),(3)) 270 + addi 4, 4, 16 271 + STXVD2X (1,(4),(3)) 272 + addi 4, 4, 16 273 + STXVD2X (2,(4),(3)) 274 + addi 4, 4, 16 275 + STXVD2X (3,(4),(3)) 276 + addi 4, 4, 16 277 + STXVD2X (4,(4),(3)) 278 + addi 4, 4, 16 279 + STXVD2X (5,(4),(3)) 280 + addi 4, 4, 16 281 + STXVD2X (6,(4),(3)) 282 + addi 4, 4, 16 283 + STXVD2X (7,(4),(3)) 284 + addi 4, 4, 16 285 + STXVD2X (8,(4),(3)) 286 + addi 4, 4, 16 287 + STXVD2X (9,(4),(3)) 288 + addi 4, 4, 16 289 + STXVD2X (10,(4),(3)) 290 + addi 4, 4, 16 291 + STXVD2X (11,(4),(3)) 292 + addi 4, 4, 16 293 + STXVD2X (12,(4),(3)) 294 + addi 4, 4, 16 295 + STXVD2X (13,(4),(3)) 296 + addi 4, 4, 16 297 + STXVD2X (14,(4),(3)) 298 + addi 4, 4, 16 299 + STXVD2X (15,(4),(3)) 300 + addi 4, 4, 16 301 + STXVD2X (16,(4),(3)) 302 + addi 4, 4, 16 303 + STXVD2X (17,(4),(3)) 304 + addi 4, 4, 16 305 + STXVD2X (18,(4),(3)) 306 + addi 4, 4, 16 307 + STXVD2X (19,(4),(3)) 308 + addi 4, 4, 16 309 + STXVD2X (20,(4),(3)) 310 + addi 4, 4, 16 311 + STXVD2X (21,(4),(3)) 312 + addi 4, 4, 16 313 + STXVD2X (22,(4),(3)) 314 + addi 4, 4, 16 315 + STXVD2X (23,(4),(3)) 316 + addi 4, 4, 16 317 + STXVD2X (24,(4),(3)) 318 + addi 4, 4, 16 319 + STXVD2X (25,(4),(3)) 320 + addi 4, 4, 16 321 + STXVD2X (26,(4),(3)) 322 + addi 4, 4, 16 323 + STXVD2X (27,(4),(3)) 324 + addi 4, 4, 16 325 + STXVD2X (28,(4),(3)) 326 + addi 4, 4, 16 327 + STXVD2X (29,(4),(3)) 328 + addi 4, 4, 16 329 + STXVD2X (30,(4),(3)) 330 + addi 4, 4, 16 331 + STXVD2X (31,(4),(3)) 332 + addi 4, 4, 16 333 + STXVD2X (32,(4),(3)) 334 + addi 4, 4, 16 335 + STXVD2X (33,(4),(3)) 336 + addi 4, 4, 16 337 + STXVD2X (34,(4),(3)) 338 + addi 4, 4, 16 339 + STXVD2X (35,(4),(3)) 340 + addi 4, 4, 16 341 + STXVD2X (36,(4),(3)) 342 + addi 4, 4, 16 343 + STXVD2X (37,(4),(3)) 344 + addi 4, 4, 16 345 + STXVD2X (38,(4),(3)) 346 + addi 4, 4, 16 347 + STXVD2X (39,(4),(3)) 348 + addi 4, 4, 16 349 + STXVD2X (40,(4),(3)) 350 + addi 4, 4, 16 351 + STXVD2X (41,(4),(3)) 352 + addi 4, 4, 16 353 + STXVD2X (42,(4),(3)) 354 + addi 4, 4, 16 355 + STXVD2X (43,(4),(3)) 356 + addi 4, 4, 16 357 + STXVD2X (44,(4),(3)) 358 + addi 4, 4, 16 359 + STXVD2X (45,(4),(3)) 360 + addi 4, 4, 16 361 + STXVD2X (46,(4),(3)) 362 + addi 4, 4, 16 363 + STXVD2X (47,(4),(3)) 364 + addi 4, 4, 16 365 + STXVD2X (48,(4),(3)) 366 + addi 4, 4, 16 367 + STXVD2X (49,(4),(3)) 368 + addi 4, 4, 16 369 + STXVD2X (50,(4),(3)) 370 + addi 4, 4, 16 371 + STXVD2X (51,(4),(3)) 372 + addi 4, 4, 16 373 + STXVD2X (52,(4),(3)) 374 + addi 4, 4, 16 375 + STXVD2X (53,(4),(3)) 376 + addi 4, 4, 16 377 + STXVD2X (54,(4),(3)) 378 + addi 4, 4, 16 379 + STXVD2X (55,(4),(3)) 380 + addi 4, 4, 16 381 + STXVD2X (56,(4),(3)) 382 + addi 4, 4, 16 383 + STXVD2X (57,(4),(3)) 384 + addi 4, 4, 16 385 + STXVD2X (58,(4),(3)) 386 + addi 4, 4, 16 387 + STXVD2X (59,(4),(3)) 388 + addi 4, 4, 16 389 + STXVD2X (60,(4),(3)) 390 + addi 4, 4, 16 391 + STXVD2X (61,(4),(3)) 392 + addi 4, 4, 16 393 + STXVD2X (62,(4),(3)) 394 + addi 4, 4, 16 395 + STXVD2X (63,(4),(3)) 396 + blr 397 + FUNC_END(storevsx)
+1
tools/testing/selftests/powerpc/ptrace/.gitignore
··· 4 4 ptrace-tar 5 5 ptrace-tm-tar 6 6 ptrace-tm-spd-tar 7 + ptrace-vsx
+1 -1
tools/testing/selftests/powerpc/ptrace/Makefile
··· 1 1 TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ 2 - ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar 2 + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx 3 3 4 4 include ../../lib.mk 5 5
+117
tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
··· 1 + /* 2 + * Ptrace test for VMX/VSX registers 3 + * 4 + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + #include "ptrace.h" 12 + #include "ptrace-vsx.h" 13 + 14 + /* Tracer and Tracee Shared Data */ 15 + int shm_id; 16 + int *cptr, *pptr; 17 + 18 + unsigned long fp_load[VEC_MAX]; 19 + unsigned long fp_load_new[VEC_MAX]; 20 + unsigned long fp_store[VEC_MAX]; 21 + 22 + void vsx(void) 23 + { 24 + int ret; 25 + 26 + cptr = (int *)shmat(shm_id, NULL, 0); 27 + loadvsx(fp_load, 0); 28 + cptr[1] = 1; 29 + 30 + while (!cptr[0]) 31 + asm volatile("" : : : "memory"); 32 + shmdt((void *) cptr); 33 + 34 + storevsx(fp_store, 0); 35 + ret = compare_vsx_vmx(fp_store, fp_load_new); 36 + if (ret) 37 + exit(1); 38 + exit(0); 39 + } 40 + 41 + int trace_vsx(pid_t child) 42 + { 43 + unsigned long vsx[VSX_MAX]; 44 + unsigned long vmx[VMX_MAX + 2][2]; 45 + 46 + FAIL_IF(start_trace(child)); 47 + FAIL_IF(show_vsx(child, vsx)); 48 + FAIL_IF(validate_vsx(vsx, fp_load)); 49 + FAIL_IF(show_vmx(child, vmx)); 50 + FAIL_IF(validate_vmx(vmx, fp_load)); 51 + 52 + memset(vsx, 0, sizeof(vsx)); 53 + memset(vmx, 0, sizeof(vmx)); 54 + load_vsx_vmx(fp_load_new, vsx, vmx); 55 + 56 + FAIL_IF(write_vsx(child, vsx)); 57 + FAIL_IF(write_vmx(child, vmx)); 58 + FAIL_IF(stop_trace(child)); 59 + 60 + return TEST_PASS; 61 + } 62 + 63 + int ptrace_vsx(void) 64 + { 65 + pid_t pid; 66 + int ret, status, i; 67 + 68 + shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT); 69 + 70 + for (i = 0; i < VEC_MAX; i++) 71 + fp_load[i] = i + rand(); 72 + 73 + for (i = 0; i < VEC_MAX; i++) 74 + fp_load_new[i] = i + 2 * rand(); 75 + 76 + pid = fork(); 77 + if (pid < 0) { 78 + perror("fork() failed"); 79 + return TEST_FAIL; 80 + } 81 + 82 + if (pid == 0) 83 + vsx(); 84 + 85 + if (pid) { 86 + pptr = (int *)shmat(shm_id, NULL, 0); 87 + while (!pptr[1]) 88 + asm volatile("" : : : "memory"); 89 + 90 + ret = trace_vsx(pid); 91 + if (ret) { 92 + kill(pid, SIGTERM); 93 + shmdt((void *)pptr); 94 + shmctl(shm_id, IPC_RMID, NULL); 95 + return TEST_FAIL; 96 + } 97 + 98 + pptr[0] = 1; 99 + shmdt((void *)pptr); 100 + 101 + ret = wait(&status); 102 + shmctl(shm_id, IPC_RMID, NULL); 103 + if (ret != pid) { 104 + printf("Child's exit status not captured\n"); 105 + return TEST_FAIL; 106 + } 107 + 108 + return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL : 109 + TEST_PASS; 110 + } 111 + return TEST_PASS; 112 + } 113 + 114 + int main(int argc, char *argv[]) 115 + { 116 + return test_harness(ptrace_vsx, "ptrace_vsx"); 117 + }
+127
tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
··· 1 + /* 2 + * Copyright (C) 2015 Anshuman Khandual, IBM Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + #define VEC_MAX 128 10 + #define VSX_MAX 32 11 + #define VMX_MAX 32 12 + 13 + /* 14 + * unsigned long vsx[32] 15 + * unsigned long load[128] 16 + */ 17 + int validate_vsx(unsigned long *vsx, unsigned long *load) 18 + { 19 + int i; 20 + 21 + for (i = 0; i < VSX_MAX; i++) { 22 + if (vsx[i] != load[2 * i + 1]) { 23 + printf("vsx[%d]: %lx load[%d] %lx\n", 24 + i, vsx[i], 2 * i + 1, load[2 * i + 1]); 25 + return TEST_FAIL; 26 + } 27 + } 28 + return TEST_PASS; 29 + } 30 + 31 + /* 32 + * unsigned long vmx[32][2] 33 + * unsigned long load[128] 34 + */ 35 + int validate_vmx(unsigned long vmx[][2], unsigned long *load) 36 + { 37 + int i; 38 + 39 + for (i = 0; i < VMX_MAX; i++) { 40 + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 41 + if ((vmx[i][0] != load[64 + 2 * i]) || 42 + (vmx[i][1] != load[65 + 2 * i])) { 43 + printf("vmx[%d][0]: %lx load[%d] %lx\n", 44 + i, vmx[i][0], 64 + 2 * i, 45 + load[64 + 2 * i]); 46 + printf("vmx[%d][1]: %lx load[%d] %lx\n", 47 + i, vmx[i][1], 65 + 2 * i, 48 + load[65 + 2 * i]); 49 + return TEST_FAIL; 50 + } 51 + #else /* 52 + * In LE each value pair is stored in an 53 + * alternate manner. 54 + */ 55 + if ((vmx[i][0] != load[65 + 2 * i]) || 56 + (vmx[i][1] != load[64 + 2 * i])) { 57 + printf("vmx[%d][0]: %lx load[%d] %lx\n", 58 + i, vmx[i][0], 65 + 2 * i, 59 + load[65 + 2 * i]); 60 + printf("vmx[%d][1]: %lx load[%d] %lx\n", 61 + i, vmx[i][1], 64 + 2 * i, 62 + load[64 + 2 * i]); 63 + return TEST_FAIL; 64 + } 65 + #endif 66 + } 67 + return TEST_PASS; 68 + } 69 + 70 + /* 71 + * unsigned long store[128] 72 + * unsigned long load[128] 73 + */ 74 + int compare_vsx_vmx(unsigned long *store, unsigned long *load) 75 + { 76 + int i; 77 + 78 + for (i = 0; i < VSX_MAX; i++) { 79 + if (store[1 + 2 * i] != load[1 + 2 * i]) { 80 + printf("store[%d]: %lx load[%d] %lx\n", 81 + 1 + 2 * i, store[i], 82 + 1 + 2 * i, load[i]); 83 + return TEST_FAIL; 84 + } 85 + } 86 + 87 + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 88 + for (i = 64; i < VEC_MAX; i++) { 89 + if (store[i] != load[i]) { 90 + printf("store[%d]: %lx load[%d] %lx\n", 91 + i, store[i], i, load[i]); 92 + return TEST_FAIL; 93 + } 94 + } 95 + #else /* In LE each value pair is stored in an alternate manner */ 96 + for (i = 64; i < VEC_MAX; i++) { 97 + if (!(i % 2) && (store[i] != load[i+1])) { 98 + printf("store[%d]: %lx load[%d] %lx\n", 99 + i, store[i], i+1, load[i+1]); 100 + return TEST_FAIL; 101 + } 102 + if ((i % 2) && (store[i] != load[i-1])) { 103 + printf("here store[%d]: %lx load[%d] %lx\n", 104 + i, store[i], i-1, load[i-1]); 105 + return TEST_FAIL; 106 + } 107 + } 108 + #endif 109 + return TEST_PASS; 110 + } 111 + 112 + void load_vsx_vmx(unsigned long *load, unsigned long *vsx, 113 + unsigned long vmx[][2]) 114 + { 115 + int i; 116 + 117 + for (i = 0; i < VSX_MAX; i++) 118 + vsx[i] = load[1 + 2 * i]; 119 + 120 + for (i = 0; i < VMX_MAX; i++) { 121 + vmx[i][0] = load[64 + 2 * i]; 122 + vmx[i][1] = load[65 + 2 * i]; 123 + } 124 + } 125 + 126 + void loadvsx(void *p, int tmp); 127 + void storevsx(void *p, int tmp);
+119
tools/testing/selftests/powerpc/ptrace/ptrace.h
··· 486 486 return TEST_PASS; 487 487 } 488 488 489 + /* VMX */ 490 + int show_vmx(pid_t child, unsigned long vmx[][2]) 491 + { 492 + int ret; 493 + 494 + ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx); 495 + if (ret) { 496 + perror("ptrace(PTRACE_GETVRREGS) failed"); 497 + return TEST_FAIL; 498 + } 499 + return TEST_PASS; 500 + } 501 + 502 + int show_vmx_ckpt(pid_t child, unsigned long vmx[][2]) 503 + { 504 + unsigned long regs[34][2]; 505 + struct iovec iov; 506 + int ret; 507 + 508 + iov.iov_base = (u64 *) regs; 509 + iov.iov_len = sizeof(regs); 510 + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVMX, &iov); 511 + if (ret) { 512 + perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVMX) failed"); 513 + return TEST_FAIL; 514 + } 515 + memcpy(vmx, regs, sizeof(regs)); 516 + return TEST_PASS; 517 + } 518 + 519 + 520 + int write_vmx(pid_t child, unsigned long vmx[][2]) 521 + { 522 + int ret; 523 + 524 + ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx); 525 + if (ret) { 526 + perror("ptrace(PTRACE_SETVRREGS) failed"); 527 + return TEST_FAIL; 528 + } 529 + return TEST_PASS; 530 + } 531 + 532 + int write_vmx_ckpt(pid_t child, unsigned long vmx[][2]) 533 + { 534 + unsigned long regs[34][2]; 535 + struct iovec iov; 536 + int ret; 537 + 538 + memcpy(regs, vmx, sizeof(regs)); 539 + iov.iov_base = (u64 *) regs; 540 + iov.iov_len = sizeof(regs); 541 + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVMX, &iov); 542 + if (ret) { 543 + perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVMX) failed"); 544 + return TEST_FAIL; 545 + } 546 + return TEST_PASS; 547 + } 548 + 549 + /* VSX */ 550 + int show_vsx(pid_t child, unsigned long *vsx) 551 + { 552 + int ret; 553 + 554 + ret = ptrace(PTRACE_GETVSRREGS, child, 0, vsx); 555 + if (ret) { 556 + perror("ptrace(PTRACE_GETVSRREGS) failed"); 557 + return TEST_FAIL; 558 + } 559 + return TEST_PASS; 560 + } 561 + 562 + int show_vsx_ckpt(pid_t child, unsigned long *vsx) 563 + { 564 + unsigned long regs[32]; 565 + struct iovec iov; 566 + int ret; 567 + 568 + iov.iov_base = (u64 *) regs; 569 + iov.iov_len = sizeof(regs); 570 + ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVSX, &iov); 571 + if (ret) { 572 + perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVSX) failed"); 573 + return TEST_FAIL; 574 + } 575 + memcpy(vsx, regs, sizeof(regs)); 576 + return TEST_PASS; 577 + } 578 + 579 + int write_vsx(pid_t child, unsigned long *vsx) 580 + { 581 + int ret; 582 + 583 + ret = ptrace(PTRACE_SETVSRREGS, child, 0, vsx); 584 + if (ret) { 585 + perror("ptrace(PTRACE_SETVSRREGS) failed"); 586 + return TEST_FAIL; 587 + } 588 + return TEST_PASS; 589 + } 590 + 591 + int write_vsx_ckpt(pid_t child, unsigned long *vsx) 592 + { 593 + unsigned long regs[32]; 594 + struct iovec iov; 595 + int ret; 596 + 597 + memcpy(regs, vsx, sizeof(regs)); 598 + iov.iov_base = (u64 *) regs; 599 + iov.iov_len = sizeof(regs); 600 + ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVSX, &iov); 601 + if (ret) { 602 + perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVSX) failed"); 603 + return TEST_FAIL; 604 + } 605 + return TEST_PASS; 606 + } 607 + 489 608 /* Analyse TEXASR after TM failure */ 490 609 inline unsigned long get_tfiar(void) 491 610 {