Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33
34#include "amdgpu_reset.h"
35
36static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37{
38 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39}
40
41static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42{
43 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44}
45
46/*
47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49 * by host.
50 *
51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52 * correct value since it doesn't return the RCV_DW0 under the case that
53 * RCV_MSG_VALID is set by host.
54 */
55static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56{
57 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58}
59
60
61static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63{
64 int r = 0;
65 u32 reg;
66
67 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 if (reg == IDH_FAIL)
69 r = -EINVAL;
70 if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
71 r = -ENODEV;
72 else if (reg != event)
73 return -ENOENT;
74
75 xgpu_nv_mailbox_send_ack(adev);
76
77 return r;
78}
79
80static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
81{
82 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
83}
84
85static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
86{
87 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
88 u8 reg;
89
90 do {
91 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
92 if (reg & 2)
93 return 0;
94
95 mdelay(5);
96 timeout -= 5;
97 } while (timeout > 1);
98
99 dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
100
101 return -ETIME;
102}
103
104static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
105{
106 int r;
107 uint64_t timeout, now;
108 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
109
110 now = (uint64_t)ktime_to_ms(ktime_get());
111 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
112
113 do {
114 r = xgpu_nv_mailbox_rcv_msg(adev, event);
115 if (!r) {
116 dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
117 event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
118 return 0;
119 } else if (r == -ENODEV) {
120 if (!amdgpu_ras_is_rma(adev)) {
121 ras->is_rma = true;
122 dev_err(adev->dev, "VF is in an unrecoverable state. "
123 "Runtime Services are halted.\n");
124 }
125 return r;
126 }
127
128 msleep(10);
129 now = (uint64_t)ktime_to_ms(ktime_get());
130 } while (timeout > now);
131
132 dev_dbg(adev->dev, "nv_poll_msg timed out\n");
133
134 return -ETIME;
135}
136
137static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
138 enum idh_request req, u32 data1, u32 data2, u32 data3)
139{
140 int r;
141 uint8_t trn;
142
143 /* IMPORTANT:
144 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
145 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
146 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
147 * will return immediatly
148 */
149 do {
150 xgpu_nv_mailbox_set_valid(adev, false);
151 trn = xgpu_nv_peek_ack(adev);
152 if (trn) {
153 dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
154 msleep(1);
155 }
156 } while (trn);
157
158 dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
159 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
160 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
161 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
162 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
163 xgpu_nv_mailbox_set_valid(adev, true);
164
165 /* start to poll ack */
166 r = xgpu_nv_poll_ack(adev);
167 if (r)
168 dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
169
170 xgpu_nv_mailbox_set_valid(adev, false);
171}
172
173static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
174 enum idh_request req, u32 data1, u32 data2, u32 data3)
175{
176 struct amdgpu_virt *virt = &adev->virt;
177 int r = 0, retry = 1;
178 enum idh_event event = -1;
179
180 mutex_lock(&virt->access_req_mutex);
181send_request:
182
183 if (amdgpu_ras_is_rma(adev)) {
184 r = -ENODEV;
185 goto out;
186 }
187
188 xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
189
190 switch (req) {
191 case IDH_REQ_GPU_INIT_ACCESS:
192 case IDH_REQ_GPU_FINI_ACCESS:
193 case IDH_REQ_GPU_RESET_ACCESS:
194 event = IDH_READY_TO_ACCESS_GPU;
195 break;
196 case IDH_REQ_GPU_INIT_DATA:
197 event = IDH_REQ_GPU_INIT_DATA_READY;
198 break;
199 case IDH_RAS_POISON:
200 if (data1 != 0)
201 event = IDH_RAS_POISON_READY;
202 break;
203 case IDH_REQ_RAS_ERROR_COUNT:
204 event = IDH_RAS_ERROR_COUNT_READY;
205 break;
206 case IDH_REQ_RAS_CPER_DUMP:
207 event = IDH_RAS_CPER_DUMP_READY;
208 break;
209 case IDH_REQ_RAS_CHK_CRITI:
210 event = IDH_REQ_RAS_CHK_CRITI_READY;
211 break;
212 default:
213 break;
214 }
215
216 if (event != -1) {
217 r = xgpu_nv_poll_msg(adev, event);
218 if (r) {
219 if (retry++ < 5)
220 goto send_request;
221
222 if (req != IDH_REQ_GPU_INIT_DATA) {
223 dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
224 goto out;
225 } else /* host doesn't support REQ_GPU_INIT_DATA handshake */
226 adev->virt.req_init_data_ver = 0;
227 } else {
228 if (req == IDH_REQ_GPU_INIT_DATA) {
229 switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
230 case GPU_CRIT_REGION_V2:
231 adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
232 adev->virt.init_data_header.offset =
233 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
234 adev->virt.init_data_header.size_kb =
235 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
236 break;
237 default:
238 adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
239 adev->virt.init_data_header.offset = -1;
240 adev->virt.init_data_header.size_kb = 0;
241 break;
242 }
243 }
244 }
245
246 /* Retrieve checksum from mailbox2 */
247 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
248 adev->virt.fw_reserve.checksum_key =
249 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
250 }
251 }
252
253out:
254 mutex_unlock(&virt->access_req_mutex);
255
256 return r;
257}
258
259static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
260 enum idh_request req)
261{
262 return xgpu_nv_send_access_requests_with_param(adev,
263 req, 0, 0, 0);
264}
265
266static int xgpu_nv_request_reset(struct amdgpu_device *adev)
267{
268 int ret, i = 0;
269
270 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
271 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
272 if (!ret)
273 break;
274 i++;
275 }
276
277 return ret;
278}
279
280static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
281 bool init)
282{
283 enum idh_request req;
284
285 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
286 return xgpu_nv_send_access_requests(adev, req);
287}
288
289static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
290 bool init)
291{
292 enum idh_request req;
293 int r = 0;
294
295 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
296 r = xgpu_nv_send_access_requests(adev, req);
297
298 return r;
299}
300
301static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
302{
303 return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
304 0, GPU_CRIT_REGION_V2, 0);
305}
306
307static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
308 struct amdgpu_irq_src *source,
309 struct amdgpu_iv_entry *entry)
310{
311 dev_dbg(adev->dev, "get ack intr and do nothing.\n");
312 return 0;
313}
314
315static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
316 struct amdgpu_irq_src *source,
317 unsigned type,
318 enum amdgpu_interrupt_state state)
319{
320 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
321
322 if (state == AMDGPU_IRQ_STATE_ENABLE)
323 tmp |= 2;
324 else
325 tmp &= ~2;
326
327 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
328
329 return 0;
330}
331
332static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
333{
334 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
335}
336
337static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
338{
339 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
340 do {
341 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
342 dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
343 return 0;
344 }
345 msleep(10);
346 timeout -= 10;
347 } while (timeout > 1);
348
349 dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
350 return -ETIME;
351}
352
353static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
354{
355 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
356 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
357 struct amdgpu_reset_context reset_context = { 0 };
358
359 amdgpu_virt_fini_data_exchange(adev);
360
361 /* Trigger recovery for world switch failure if no TDR */
362 if (amdgpu_device_should_recover_gpu(adev)
363 && (!amdgpu_device_has_job_running(adev) ||
364 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
365 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
366 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
367 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
368
369 reset_context.method = AMD_RESET_METHOD_NONE;
370 reset_context.reset_req_dev = adev;
371 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
372 set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
373
374 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
375 }
376}
377
378static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
379{
380 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
381 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
382
383 if (down_read_trylock(&adev->reset_domain->sem)) {
384 amdgpu_virt_fini_data_exchange(adev);
385 amdgpu_virt_request_bad_pages(adev);
386 up_read(&adev->reset_domain->sem);
387 }
388}
389
390/**
391 * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
392 * @work: pointer to the work_struct
393 *
394 * This work handler is triggered when bad pages are ready, and it reinitializes
395 * the data exchange region to retrieve updated bad page information from the host.
396 */
397static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
398{
399 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
400 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
401
402 if (down_read_trylock(&adev->reset_domain->sem)) {
403 amdgpu_virt_fini_data_exchange(adev);
404 amdgpu_virt_init_data_exchange(adev);
405 up_read(&adev->reset_domain->sem);
406 }
407}
408
409static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
410 struct amdgpu_irq_src *src,
411 unsigned type,
412 enum amdgpu_interrupt_state state)
413{
414 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
415
416 if (state == AMDGPU_IRQ_STATE_ENABLE)
417 tmp |= 1;
418 else
419 tmp &= ~1;
420
421 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
422
423 return 0;
424}
425
426static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
427 struct amdgpu_irq_src *source,
428 struct amdgpu_iv_entry *entry)
429{
430 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
431 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
432
433 switch (event) {
434 case IDH_RAS_BAD_PAGES_READY:
435 xgpu_nv_mailbox_send_ack(adev);
436 if (amdgpu_sriov_runtime(adev))
437 schedule_work(&adev->virt.handle_bad_pages_work);
438 break;
439 case IDH_RAS_BAD_PAGES_NOTIFICATION:
440 xgpu_nv_mailbox_send_ack(adev);
441 if (amdgpu_sriov_runtime(adev))
442 schedule_work(&adev->virt.req_bad_pages_work);
443 break;
444 case IDH_UNRECOV_ERR_NOTIFICATION:
445 xgpu_nv_mailbox_send_ack(adev);
446 if (!amdgpu_ras_is_rma(adev)) {
447 ras->is_rma = true;
448 dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
449 }
450
451 if (amdgpu_sriov_runtime(adev))
452 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
453 &adev->virt.flr_work),
454 "Failed to queue work! at %s",
455 __func__);
456 break;
457 case IDH_FLR_NOTIFICATION:
458 if (amdgpu_sriov_runtime(adev))
459 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
460 &adev->virt.flr_work),
461 "Failed to queue work! at %s",
462 __func__);
463 break;
464 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
465 * it byfar since that polling thread will handle it,
466 * other msg like flr complete is not handled here.
467 */
468 case IDH_CLR_MSG_BUF:
469 case IDH_FLR_NOTIFICATION_CMPL:
470 case IDH_READY_TO_ACCESS_GPU:
471 default:
472 break;
473 }
474
475 return 0;
476}
477
478static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
479 .set = xgpu_nv_set_mailbox_ack_irq,
480 .process = xgpu_nv_mailbox_ack_irq,
481};
482
483static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
484 .set = xgpu_nv_set_mailbox_rcv_irq,
485 .process = xgpu_nv_mailbox_rcv_irq,
486};
487
488void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
489{
490 adev->virt.ack_irq.num_types = 1;
491 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
492 adev->virt.rcv_irq.num_types = 1;
493 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
494}
495
496int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
497{
498 int r;
499
500 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
501 if (r)
502 return r;
503
504 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
505 if (r) {
506 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
507 return r;
508 }
509
510 return 0;
511}
512
513int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
514{
515 int r;
516
517 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
518 if (r)
519 return r;
520 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
521 if (r) {
522 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
523 return r;
524 }
525
526 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
527 INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
528 INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
529
530 return 0;
531}
532
533void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
534{
535 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
536 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
537}
538
539static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
540 enum amdgpu_ras_block block)
541{
542 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
543 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
544 } else {
545 amdgpu_virt_fini_data_exchange(adev);
546 xgpu_nv_send_access_requests_with_param(adev,
547 IDH_RAS_POISON, block, 0, 0);
548 }
549}
550
551static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
552{
553 enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
554
555 return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
556}
557
558static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
559{
560 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
561}
562
563static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
564{
565 uint32_t vf_rptr_hi, vf_rptr_lo;
566
567 vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
568 vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
569 return xgpu_nv_send_access_requests_with_param(
570 adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
571}
572
573static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
574{
575 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
576}
577
578static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
579{
580 uint32_t addr_hi, addr_lo;
581
582 addr_hi = (uint32_t)(addr >> 32);
583 addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
584 return xgpu_nv_send_access_requests_with_param(
585 adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
586}
587
588const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
589 .req_full_gpu = xgpu_nv_request_full_gpu_access,
590 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
591 .req_init_data = xgpu_nv_request_init_data,
592 .reset_gpu = xgpu_nv_request_reset,
593 .ready_to_reset = xgpu_nv_ready_to_reset,
594 .wait_reset = xgpu_nv_wait_reset,
595 .trans_msg = xgpu_nv_mailbox_trans_msg,
596 .ras_poison_handler = xgpu_nv_ras_poison_handler,
597 .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
598 .req_ras_err_count = xgpu_nv_req_ras_err_count,
599 .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
600 .req_bad_pages = xgpu_nv_req_ras_bad_pages,
601 .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
602};