Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linux-5.6' of git://github.com/skeggsb/linux into drm-next

- Rewrite of the ACR (formerly "secure boot") code, both to support
Turing, support multiple FW revisions, and to make life easier when
having to debug HW/FW bring-up in the future
- Support for TU10x graphics engine, TU11x not available yet as FW isn't ready
- Proper page 'kind' mappings for Turing
- 10-bit LUT support
- GP10B (Tegra) fixes
- Misc other fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Ben Skeggs <skeggsb@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ <CACAvsv5GKasg9-hEUwp9+aHVJg+nbQ0LukXyudgj6=YKu96jWQ@mail.gmail.com

+8201 -7852
+1 -2
drivers/gpu/drm/nouveau/dispnv04/arb.c
··· 54 54 nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) 55 55 { 56 56 int pagemiss, cas, width, bpp; 57 - int nvclks, mclks, pclks, crtpagemiss; 57 + int nvclks, mclks, crtpagemiss; 58 58 int found, mclk_extra, mclk_loop, cbs, m1, p1; 59 59 int mclk_freq, pclk_freq, nvclk_freq; 60 60 int us_m, us_n, us_p, crtc_drain_rate; ··· 69 69 bpp = arb->bpp; 70 70 cbs = 128; 71 71 72 - pclks = 2; 73 72 nvclks = 10; 74 73 mclks = 13 + cas; 75 74 mclk_extra = 3;
+5 -8
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
··· 644 644 int i; 645 645 646 646 if (nouveau_tv_norm) { 647 - for (i = 0; i < num_tv_norms; i++) { 648 - if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { 649 - tv_enc->tv_norm = i; 650 - break; 651 - } 652 - } 653 - 654 - if (i == num_tv_norms) 647 + i = match_string(nv17_tv_norm_names, num_tv_norms, 648 + nouveau_tv_norm); 649 + if (i < 0) 655 650 NV_WARN(drm, "Invalid TV norm setting \"%s\"\n", 656 651 nouveau_tv_norm); 652 + else 653 + tv_enc->tv_norm = i; 657 654 } 658 655 659 656 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+8 -3
drivers/gpu/drm/nouveau/dispnv50/base907c.c
··· 75 75 } 76 76 } 77 77 78 - static void 79 - base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) 78 + static bool 79 + base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) 80 80 { 81 - asyw->xlut.i.mode = 7; 81 + if (size != 256 && size != 1024) 82 + return false; 83 + 84 + asyw->xlut.i.mode = size == 1024 ? 4 : 7; 82 85 asyw->xlut.i.enable = 2; 83 86 asyw->xlut.i.load = head907d_olut_load; 87 + return true; 84 88 } 85 89 86 90 static inline u32 ··· 164 160 .csc_set = base907c_csc_set, 165 161 .csc_clr = base907c_csc_clr, 166 162 .olut_core = true, 163 + .ilut_size = 1024, 167 164 .xlut_set = base907c_xlut_set, 168 165 .xlut_clr = base907c_xlut_clr, 169 166 .image_set = base907c_image_set,
+67 -44
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 660 660 struct nouveau_encoder *outp; 661 661 662 662 struct drm_dp_mst_topology_mgr mgr; 663 - struct nv50_msto *msto[4]; 664 663 665 664 bool modified; 666 665 bool disabled; ··· 725 726 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); 726 727 727 728 msto->mstc = NULL; 728 - msto->head = NULL; 729 729 msto->disabled = false; 730 730 } 731 731 ··· 870 872 mstm->outp->update(mstm->outp, head->base.index, armh, proto, 871 873 nv50_dp_bpc_to_depth(armh->or.bpc)); 872 874 873 - msto->head = head; 874 875 msto->mstc = mstc; 875 876 mstm->modified = true; 876 877 } ··· 910 913 .destroy = nv50_msto_destroy, 911 914 }; 912 915 913 - static int 914 - nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id, 915 - struct nv50_msto **pmsto) 916 + static struct nv50_msto * 917 + nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id) 916 918 { 917 919 struct nv50_msto *msto; 918 920 int ret; 919 921 920 - if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL))) 921 - return -ENOMEM; 922 + msto = kzalloc(sizeof(*msto), GFP_KERNEL); 923 + if (!msto) 924 + return ERR_PTR(-ENOMEM); 922 925 923 926 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto, 924 - DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id); 927 + DRM_MODE_ENCODER_DPMST, "mst-%d", id); 925 928 if (ret) { 926 - kfree(*pmsto); 927 - *pmsto = NULL; 928 - return ret; 929 + kfree(msto); 930 + return ERR_PTR(ret); 929 931 } 930 932 931 933 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help); 932 - msto->encoder.possible_crtcs = heads; 933 - return 0; 934 + msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base); 935 + msto->head = head; 936 + return msto; 934 937 } 935 938 936 939 static struct drm_encoder * 937 940 nv50_mstc_atomic_best_encoder(struct drm_connector *connector, 938 941 struct drm_connector_state *connector_state) 939 942 { 940 - struct nv50_head *head = nv50_head(connector_state->crtc); 941 943 struct nv50_mstc *mstc = nv50_mstc(connector); 944 + struct drm_crtc *crtc = connector_state->crtc; 942 945 943 - return &mstc->mstm->msto[head->base.index]->encoder; 944 - } 946 + if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc))) 947 + return NULL; 945 948 946 - static struct drm_encoder * 947 - nv50_mstc_best_encoder(struct drm_connector *connector) 948 - { 949 - struct nv50_mstc *mstc = nv50_mstc(connector); 950 - 951 - return &mstc->mstm->msto[0]->encoder; 949 + return &nv50_head(crtc)->msto->encoder; 952 950 } 953 951 954 952 static enum drm_mode_status ··· 1030 1038 nv50_mstc_help = { 1031 1039 .get_modes = nv50_mstc_get_modes, 1032 1040 .mode_valid = nv50_mstc_mode_valid, 1033 - .best_encoder = nv50_mstc_best_encoder, 1034 1041 .atomic_best_encoder = nv50_mstc_atomic_best_encoder, 1035 1042 .atomic_check = nv50_mstc_atomic_check, 1036 1043 .detect_ctx = nv50_mstc_detect, ··· 1062 1071 const char *path, struct nv50_mstc **pmstc) 1063 1072 { 1064 1073 struct drm_device *dev = mstm->outp->base.base.dev; 1074 + struct drm_crtc *crtc; 1065 1075 struct nv50_mstc *mstc; 1066 - int ret, i; 1076 + int ret; 1067 1077 1068 1078 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL))) 1069 1079 return -ENOMEM; ··· 1084 1092 mstc->connector.funcs->reset(&mstc->connector); 1085 1093 nouveau_conn_attach_properties(&mstc->connector); 1086 1094 1087 - for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++) 1088 - drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); 1095 + drm_for_each_crtc(crtc, dev) { 1096 + if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc))) 1097 + continue; 1098 + 1099 + drm_connector_attach_encoder(&mstc->connector, 1100 + &nv50_head(crtc)->msto->encoder); 1101 + } 1089 1102 1090 1103 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0); 1091 1104 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0); ··· 1364 1367 const int max_payloads = hweight8(outp->dcb->heads); 1365 1368 struct drm_device *dev = outp->base.base.dev; 1366 1369 struct nv50_mstm *mstm; 1367 - int ret, i; 1370 + int ret; 1368 1371 u8 dpcd; 1369 1372 1370 1373 /* This is a workaround for some monitors not functioning ··· 1386 1389 max_payloads, conn_base_id); 1387 1390 if (ret) 1388 1391 return ret; 1389 - 1390 - for (i = 0; i < max_payloads; i++) { 1391 - ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name, 1392 - i, &mstm->msto[i]); 1393 - if (ret) 1394 - return ret; 1395 - } 1396 1392 1397 1393 return 0; 1398 1394 } ··· 1559 1569 .destroy = nv50_sor_destroy, 1560 1570 }; 1561 1571 1572 + static bool nv50_has_mst(struct nouveau_drm *drm) 1573 + { 1574 + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); 1575 + u32 data; 1576 + u8 ver, hdr, cnt, len; 1577 + 1578 + data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len); 1579 + return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04); 1580 + } 1581 + 1562 1582 static int 1563 1583 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) 1564 1584 { 1565 1585 struct nouveau_connector *nv_connector = nouveau_connector(connector); 1566 1586 struct nouveau_drm *drm = nouveau_drm(connector->dev); 1567 - struct nvkm_bios *bios = nvxx_bios(&drm->client.device); 1568 1587 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); 1569 1588 struct nouveau_encoder *nv_encoder; 1570 1589 struct drm_encoder *encoder; 1571 - u8 ver, hdr, cnt, len; 1572 - u32 data; 1573 1590 int type, ret; 1574 1591 1575 1592 switch (dcbe->type) { ··· 1621 1624 } 1622 1625 1623 1626 if (nv_connector->type != DCB_CONNECTOR_eDP && 1624 - (data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len)) && 1625 - ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04)) { 1626 - ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, 1627 - nv_connector->base.base.id, 1627 + nv50_has_mst(drm)) { 1628 + ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 1629 + 16, nv_connector->base.base.id, 1628 1630 &nv_encoder->dp.mstm); 1629 1631 if (ret) 1630 1632 return ret; ··· 2319 2323 struct nv50_disp *disp; 2320 2324 struct dcb_output *dcbe; 2321 2325 int crtcs, ret, i; 2326 + bool has_mst = nv50_has_mst(drm); 2322 2327 2323 2328 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 2324 2329 if (!disp) ··· 2368 2371 crtcs = 0x3; 2369 2372 2370 2373 for (i = 0; i < fls(crtcs); i++) { 2374 + struct nv50_head *head; 2375 + 2371 2376 if (!(crtcs & (1 << i))) 2372 2377 continue; 2373 - ret = nv50_head_create(dev, i); 2374 - if (ret) 2378 + 2379 + head = nv50_head_create(dev, i); 2380 + if (IS_ERR(head)) { 2381 + ret = PTR_ERR(head); 2375 2382 goto out; 2383 + } 2384 + 2385 + if (has_mst) { 2386 + head->msto = nv50_msto_new(dev, head, i); 2387 + if (IS_ERR(head->msto)) { 2388 + ret = PTR_ERR(head->msto); 2389 + head->msto = NULL; 2390 + goto out; 2391 + } 2392 + 2393 + /* 2394 + * FIXME: This is a hack to workaround the following 2395 + * issues: 2396 + * 2397 + * https://gitlab.gnome.org/GNOME/mutter/issues/759 2398 + * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277 2399 + * 2400 + * Once these issues are closed, this should be 2401 + * removed 2402 + */ 2403 + head->msto->encoder.possible_crtcs = crtcs; 2404 + } 2376 2405 } 2377 2406 2378 2407 /* create encoder/connector objects based on VBIOS DCB table */
+2
drivers/gpu/drm/nouveau/dispnv50/disp.h
··· 4 4 5 5 #include "nouveau_display.h" 6 6 7 + struct nv50_msto; 8 + 7 9 struct nv50_disp { 8 10 struct nvif_disp *disp; 9 11 struct nv50_core *core;
+26 -17
drivers/gpu/drm/nouveau/dispnv50/head.c
··· 213 213 { 214 214 struct nv50_disp *disp = nv50_disp(head->base.base.dev); 215 215 struct drm_property_blob *olut = asyh->state.gamma_lut; 216 + int size; 216 217 217 218 /* Determine whether core output LUT should be enabled. */ 218 219 if (olut) { ··· 230 229 } 231 230 } 232 231 233 - if (!olut && !head->func->olut_identity) { 234 - asyh->olut.handle = 0; 235 - return 0; 232 + if (!olut) { 233 + if (!head->func->olut_identity) { 234 + asyh->olut.handle = 0; 235 + return 0; 236 + } 237 + size = 0; 238 + } else { 239 + size = drm_color_lut_size(olut); 236 240 } 237 241 242 + if (!head->func->olut(head, asyh, size)) { 243 + DRM_DEBUG_KMS("Invalid olut\n"); 244 + return -EINVAL; 245 + } 238 246 asyh->olut.handle = disp->core->chan.vram.handle; 239 247 asyh->olut.buffer = !asyh->olut.buffer; 240 - head->func->olut(head, asyh); 248 + 241 249 return 0; 242 250 } 243 251 ··· 483 473 .atomic_destroy_state = nv50_head_atomic_destroy_state, 484 474 }; 485 475 486 - int 476 + struct nv50_head * 487 477 nv50_head_create(struct drm_device *dev, int index) 488 478 { 489 479 struct nouveau_drm *drm = nouveau_drm(dev); ··· 495 485 496 486 head = kzalloc(sizeof(*head), GFP_KERNEL); 497 487 if (!head) 498 - return -ENOMEM; 488 + return ERR_PTR(-ENOMEM); 499 489 500 490 head->func = disp->core->func->head; 501 491 head->base.index = index; ··· 513 503 ret = nv50_curs_new(drm, head->base.index, &curs); 514 504 if (ret) { 515 505 kfree(head); 516 - return ret; 506 + return ERR_PTR(ret); 517 507 } 518 508 519 509 crtc = &head->base.base; 520 510 drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane, 521 511 &nv50_head_func, "head-%d", head->base.index); 522 512 drm_crtc_helper_add(crtc, &nv50_head_help); 513 + /* Keep the legacy gamma size at 256 to avoid compatibility issues */ 523 514 drm_mode_crtc_set_gamma_size(crtc, 256); 524 - if (disp->disp->object.oclass >= GF110_DISP) 525 - drm_crtc_enable_color_mgmt(crtc, 256, true, 256); 526 - else 527 - drm_crtc_enable_color_mgmt(crtc, 0, false, 256); 515 + drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size, 516 + disp->disp->object.oclass >= GF110_DISP, 517 + head->func->olut_size); 528 518 529 519 if (head->func->olut_set) { 530 520 ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut); 531 - if (ret) 532 - goto out; 521 + if (ret) { 522 + nv50_head_destroy(crtc); 523 + return ERR_PTR(ret); 524 + } 533 525 } 534 526 535 - out: 536 - if (ret) 537 - nv50_head_destroy(crtc); 538 - return ret; 527 + return head; 539 528 }
+6 -4
drivers/gpu/drm/nouveau/dispnv50/head.h
··· 11 11 const struct nv50_head_func *func; 12 12 struct nouveau_crtc base; 13 13 struct nv50_lut olut; 14 + struct nv50_msto *msto; 14 15 }; 15 16 16 - int nv50_head_create(struct drm_device *, int index); 17 + struct nv50_head *nv50_head_create(struct drm_device *, int index); 17 18 void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *); 18 19 void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y); 19 20 20 21 struct nv50_head_func { 21 22 void (*view)(struct nv50_head *, struct nv50_head_atom *); 22 23 void (*mode)(struct nv50_head *, struct nv50_head_atom *); 23 - void (*olut)(struct nv50_head *, struct nv50_head_atom *); 24 + bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int); 24 25 bool olut_identity; 26 + int olut_size; 25 27 void (*olut_set)(struct nv50_head *, struct nv50_head_atom *); 26 28 void (*olut_clr)(struct nv50_head *); 27 29 void (*core_calc)(struct nv50_head *, struct nv50_head_atom *); ··· 45 43 extern const struct nv50_head_func head507d; 46 44 void head507d_view(struct nv50_head *, struct nv50_head_atom *); 47 45 void head507d_mode(struct nv50_head *, struct nv50_head_atom *); 48 - void head507d_olut(struct nv50_head *, struct nv50_head_atom *); 46 + bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int); 49 47 void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *); 50 48 void head507d_core_clr(struct nv50_head *); 51 49 int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *, ··· 62 60 extern const struct nv50_head_func head907d; 63 61 void head907d_view(struct nv50_head *, struct nv50_head_atom *); 64 62 void head907d_mode(struct nv50_head *, struct nv50_head_atom *); 65 - void head907d_olut(struct nv50_head *, struct nv50_head_atom *); 63 + bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int); 66 64 void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *); 67 65 void head907d_olut_clr(struct nv50_head *); 68 66 void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
+7 -2
drivers/gpu/drm/nouveau/dispnv50/head507d.c
··· 271 271 writew(readw(mem - 4), mem + 4); 272 272 } 273 273 274 - void 275 - head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) 274 + bool 275 + head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) 276 276 { 277 + if (size != 256) 278 + return false; 279 + 277 280 if (asyh->base.cpp == 1) 278 281 asyh->olut.mode = 0; 279 282 else 280 283 asyh->olut.mode = 1; 281 284 282 285 asyh->olut.load = head507d_olut_load; 286 + return true; 283 287 } 284 288 285 289 void ··· 332 328 .view = head507d_view, 333 329 .mode = head507d_mode, 334 330 .olut = head507d_olut, 331 + .olut_size = 256, 335 332 .olut_set = head507d_olut_set, 336 333 .olut_clr = head507d_olut_clr, 337 334 .core_calc = head507d_core_calc,
+1
drivers/gpu/drm/nouveau/dispnv50/head827d.c
··· 108 108 .view = head507d_view, 109 109 .mode = head507d_mode, 110 110 .olut = head507d_olut, 111 + .olut_size = 256, 111 112 .olut_set = head827d_olut_set, 112 113 .olut_clr = head827d_olut_clr, 113 114 .core_calc = head507d_core_calc,
+8 -3
drivers/gpu/drm/nouveau/dispnv50/head907d.c
··· 230 230 writew(readw(mem - 4), mem + 4); 231 231 } 232 232 233 - void 234 - head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) 233 + bool 234 + head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) 235 235 { 236 - asyh->olut.mode = 7; 236 + if (size != 256 && size != 1024) 237 + return false; 238 + 239 + asyh->olut.mode = size == 1024 ? 4 : 7; 237 240 asyh->olut.load = head907d_olut_load; 241 + return true; 238 242 } 239 243 240 244 void ··· 289 285 .view = head907d_view, 290 286 .mode = head907d_mode, 291 287 .olut = head907d_olut, 288 + .olut_size = 1024, 292 289 .olut_set = head907d_olut_set, 293 290 .olut_clr = head907d_olut_clr, 294 291 .core_calc = head507d_core_calc,
+1
drivers/gpu/drm/nouveau/dispnv50/head917d.c
··· 83 83 .view = head907d_view, 84 84 .mode = head907d_mode, 85 85 .olut = head907d_olut, 86 + .olut_size = 1024, 86 87 .olut_set = head907d_olut_set, 87 88 .olut_clr = head907d_olut_clr, 88 89 .core_calc = head507d_core_calc,
+8 -3
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
··· 148 148 } 149 149 } 150 150 151 - static void 152 - headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) 151 + static bool 152 + headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) 153 153 { 154 + if (size != 256 && size != 1024) 155 + return false; 156 + 154 157 asyh->olut.mode = 2; 155 - asyh->olut.size = 0; 158 + asyh->olut.size = size == 1024 ? 2 : 0; 156 159 asyh->olut.range = 0; 157 160 asyh->olut.output_mode = 1; 158 161 asyh->olut.load = head907d_olut_load; 162 + return true; 159 163 } 160 164 161 165 static void ··· 205 201 .view = headc37d_view, 206 202 .mode = headc37d_mode, 207 203 .olut = headc37d_olut, 204 + .olut_size = 1024, 208 205 .olut_set = headc37d_olut_set, 209 206 .olut_clr = headc37d_olut_clr, 210 207 .curs_layout = head917d_curs_layout,
+8 -4
drivers/gpu/drm/nouveau/dispnv50/headc57d.c
··· 151 151 writew(readw(mem - 4), mem + 4); 152 152 } 153 153 154 - void 155 - headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) 154 + bool 155 + headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) 156 156 { 157 + if (size != 0 && size != 256 && size != 1024) 158 + return false; 159 + 157 160 asyh->olut.mode = 2; /* DIRECT10 */ 158 161 asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */; 159 162 asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */ 160 - if (asyh->state.gamma_lut && 161 - asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256) 163 + if (size == 256) 162 164 asyh->olut.load = headc57d_olut_load_8; 163 165 else 164 166 asyh->olut.load = headc57d_olut_load; 167 + return true; 165 168 } 166 169 167 170 static void ··· 197 194 .mode = headc57d_mode, 198 195 .olut = headc57d_olut, 199 196 .olut_identity = true, 197 + .olut_size = 1024, 200 198 .olut_set = headc57d_olut_set, 201 199 .olut_clr = headc57d_olut_clr, 202 200 .curs_layout = head917d_curs_layout,
+1 -1
drivers/gpu/drm/nouveau/dispnv50/lut.c
··· 49 49 kvfree(in); 50 50 } 51 51 } else { 52 - load(in, blob->length / sizeof(*in), mem); 52 + load(in, drm_color_lut_size(blob), mem); 53 53 } 54 54 55 55 return addr;
+12 -5
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 318 318 return wndw->func->acquire(wndw, asyw, asyh); 319 319 } 320 320 321 - static void 321 + static int 322 322 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, 323 323 struct nv50_wndw_atom *armw, 324 324 struct nv50_wndw_atom *asyw, ··· 340 340 */ 341 341 if (!(ilut = asyh->state.gamma_lut)) { 342 342 asyw->visible = false; 343 - return; 343 + return 0; 344 344 } 345 345 346 346 if (wndw->func->ilut) ··· 359 359 /* Recalculate LUT state. */ 360 360 memset(&asyw->xlut, 0x00, sizeof(asyw->xlut)); 361 361 if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) { 362 - wndw->func->ilut(wndw, asyw); 362 + if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) { 363 + DRM_DEBUG_KMS("Invalid ilut\n"); 364 + return -EINVAL; 365 + } 363 366 asyw->xlut.handle = wndw->wndw.vram.handle; 364 367 asyw->xlut.i.buffer = !asyw->xlut.i.buffer; 365 368 asyw->set.xlut = true; ··· 387 384 388 385 /* Can't do an immediate flip while changing the LUT. */ 389 386 asyh->state.async_flip = false; 387 + return 0; 390 388 } 391 389 392 390 static int ··· 428 424 (!armw->visible || 429 425 asyh->state.color_mgmt_changed || 430 426 asyw->state.fb->format->format != 431 - armw->state.fb->format->format)) 432 - nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); 427 + armw->state.fb->format->format)) { 428 + ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh); 429 + if (ret) 430 + return ret; 431 + } 433 432 434 433 /* Calculate new window state. */ 435 434 if (asyw->visible) {
+2 -1
drivers/gpu/drm/nouveau/dispnv50/wndw.h
··· 64 64 void (*ntfy_clr)(struct nv50_wndw *); 65 65 int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset, 66 66 struct nvif_device *); 67 - void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *); 67 + bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int); 68 68 void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *, 69 69 const struct drm_color_ctm *); 70 70 void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *); 71 71 void (*csc_clr)(struct nv50_wndw *); 72 72 bool ilut_identity; 73 + int ilut_size; 73 74 bool olut_core; 74 75 void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *); 75 76 void (*xlut_clr)(struct nv50_wndw *);
+8 -3
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
··· 71 71 } 72 72 } 73 73 74 - static void 75 - wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) 74 + static bool 75 + wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) 76 76 { 77 + if (size != 256 && size != 1024) 78 + return false; 79 + 77 80 asyw->xlut.i.mode = 2; 78 - asyw->xlut.i.size = 0; 81 + asyw->xlut.i.size = size == 1024 ? 2 : 0; 79 82 asyw->xlut.i.range = 0; 80 83 asyw->xlut.i.output_mode = 1; 81 84 asyw->xlut.i.load = head907d_olut_load; 85 + return true; 82 86 } 83 87 84 88 void ··· 265 261 .ntfy_reset = corec37d_ntfy_init, 266 262 .ntfy_wait_begun = base507c_ntfy_wait_begun, 267 263 .ilut = wndwc37e_ilut, 264 + .ilut_size = 1024, 268 265 .xlut_set = wndwc37e_ilut_set, 269 266 .xlut_clr = wndwc37e_ilut_clr, 270 267 .csc = base907c_csc,
+7 -4
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
··· 156 156 writew(readw(mem - 4), mem + 4); 157 157 } 158 158 159 - static void 160 - wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) 159 + static bool 160 + wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) 161 161 { 162 - u16 size = asyw->ilut->length / sizeof(struct drm_color_lut); 162 + if (size = size ? size : 1024, size != 256 && size != 1024) 163 + return false; 164 + 163 165 if (size == 256) { 164 166 asyw->xlut.i.mode = 1; /* DIRECT8. */ 165 167 } else { 166 168 asyw->xlut.i.mode = 2; /* DIRECT10. */ 167 - size = 1024; 168 169 } 169 170 asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */; 170 171 asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */ 171 172 asyw->xlut.i.load = wndwc57e_ilut_load; 173 + return true; 172 174 } 173 175 174 176 static const struct nv50_wndw_func ··· 185 183 .ntfy_wait_begun = base507c_ntfy_wait_begun, 186 184 .ilut = wndwc57e_ilut, 187 185 .ilut_identity = true, 186 + .ilut_size = 1024, 188 187 .xlut_set = wndwc57e_ilut_set, 189 188 .xlut_clr = wndwc57e_ilut_clr, 190 189 .csc = base907c_csc,
+152
drivers/gpu/drm/nouveau/include/nvfw/acr.h
··· 1 + #ifndef __NVFW_ACR_H__ 2 + #define __NVFW_ACR_H__ 3 + 4 + struct wpr_header { 5 + #define WPR_HEADER_V0_FALCON_ID_INVALID 0xffffffff 6 + u32 falcon_id; 7 + u32 lsb_offset; 8 + u32 bootstrap_owner; 9 + u32 lazy_bootstrap; 10 + #define WPR_HEADER_V0_STATUS_NONE 0 11 + #define WPR_HEADER_V0_STATUS_COPY 1 12 + #define WPR_HEADER_V0_STATUS_VALIDATION_CODE_FAILED 2 13 + #define WPR_HEADER_V0_STATUS_VALIDATION_DATA_FAILED 3 14 + #define WPR_HEADER_V0_STATUS_VALIDATION_DONE 4 15 + #define WPR_HEADER_V0_STATUS_VALIDATION_SKIPPED 5 16 + #define WPR_HEADER_V0_STATUS_BOOTSTRAP_READY 6 17 + u32 status; 18 + }; 19 + 20 + void wpr_header_dump(struct nvkm_subdev *, const struct wpr_header *); 21 + 22 + struct wpr_header_v1 { 23 + #define WPR_HEADER_V1_FALCON_ID_INVALID 0xffffffff 24 + u32 falcon_id; 25 + u32 lsb_offset; 26 + u32 bootstrap_owner; 27 + u32 lazy_bootstrap; 28 + u32 bin_version; 29 + #define WPR_HEADER_V1_STATUS_NONE 0 30 + #define WPR_HEADER_V1_STATUS_COPY 1 31 + #define WPR_HEADER_V1_STATUS_VALIDATION_CODE_FAILED 2 32 + #define WPR_HEADER_V1_STATUS_VALIDATION_DATA_FAILED 3 33 + #define WPR_HEADER_V1_STATUS_VALIDATION_DONE 4 34 + #define WPR_HEADER_V1_STATUS_VALIDATION_SKIPPED 5 35 + #define WPR_HEADER_V1_STATUS_BOOTSTRAP_READY 6 36 + #define WPR_HEADER_V1_STATUS_REVOCATION_CHECK_FAILED 7 37 + u32 status; 38 + }; 39 + 40 + void wpr_header_v1_dump(struct nvkm_subdev *, const struct wpr_header_v1 *); 41 + 42 + struct lsf_signature { 43 + u8 prd_keys[2][16]; 44 + u8 dbg_keys[2][16]; 45 + u32 b_prd_present; 46 + u32 b_dbg_present; 47 + u32 falcon_id; 48 + }; 49 + 50 + struct lsf_signature_v1 { 51 + u8 prd_keys[2][16]; 52 + u8 dbg_keys[2][16]; 53 + u32 b_prd_present; 54 + u32 b_dbg_present; 55 + u32 falcon_id; 56 + u32 supports_versioning; 57 + u32 version; 58 + u32 depmap_count; 59 + u8 depmap[11/*LSF_LSB_DEPMAP_SIZE*/ * 2 * 4]; 60 + u8 kdf[16]; 61 + }; 62 + 63 + struct lsb_header_tail { 64 + u32 ucode_off; 65 + u32 ucode_size; 66 + u32 data_size; 67 + u32 bl_code_size; 68 + u32 bl_imem_off; 69 + u32 bl_data_off; 70 + u32 bl_data_size; 71 + u32 app_code_off; 72 + u32 app_code_size; 73 + u32 app_data_off; 74 + u32 app_data_size; 75 + u32 flags; 76 + }; 77 + 78 + struct lsb_header { 79 + struct lsf_signature signature; 80 + struct lsb_header_tail tail; 81 + }; 82 + 83 + void lsb_header_dump(struct nvkm_subdev *, struct lsb_header *); 84 + 85 + struct lsb_header_v1 { 86 + struct lsf_signature_v1 signature; 87 + struct lsb_header_tail tail; 88 + }; 89 + 90 + void lsb_header_v1_dump(struct nvkm_subdev *, struct lsb_header_v1 *); 91 + 92 + struct flcn_acr_desc { 93 + union { 94 + u8 reserved_dmem[0x200]; 95 + u32 signatures[4]; 96 + } ucode_reserved_space; 97 + u32 wpr_region_id; 98 + u32 wpr_offset; 99 + u32 mmu_mem_range; 100 + struct { 101 + u32 no_regions; 102 + struct { 103 + u32 start_addr; 104 + u32 end_addr; 105 + u32 region_id; 106 + u32 read_mask; 107 + u32 write_mask; 108 + u32 client_mask; 109 + } region_props[2]; 110 + } regions; 111 + u32 ucode_blob_size; 112 + u64 ucode_blob_base __aligned(8); 113 + struct { 114 + u32 vpr_enabled; 115 + u32 vpr_start; 116 + u32 vpr_end; 117 + u32 hdcp_policies; 118 + } vpr_desc; 119 + }; 120 + 121 + void flcn_acr_desc_dump(struct nvkm_subdev *, struct flcn_acr_desc *); 122 + 123 + struct flcn_acr_desc_v1 { 124 + u8 reserved_dmem[0x200]; 125 + u32 signatures[4]; 126 + u32 wpr_region_id; 127 + u32 wpr_offset; 128 + u32 mmu_memory_range; 129 + struct { 130 + u32 no_regions; 131 + struct { 132 + u32 start_addr; 133 + u32 end_addr; 134 + u32 region_id; 135 + u32 read_mask; 136 + u32 write_mask; 137 + u32 client_mask; 138 + u32 shadow_mem_start_addr; 139 + } region_props[2]; 140 + } regions; 141 + u32 ucode_blob_size; 142 + u64 ucode_blob_base __aligned(8); 143 + struct { 144 + u32 vpr_enabled; 145 + u32 vpr_start; 146 + u32 vpr_end; 147 + u32 hdcp_policies; 148 + } vpr_desc; 149 + }; 150 + 151 + void flcn_acr_desc_v1_dump(struct nvkm_subdev *, struct flcn_acr_desc_v1 *); 152 + #endif
+97
drivers/gpu/drm/nouveau/include/nvfw/flcn.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVFW_FLCN_H__ 3 + #define __NVFW_FLCN_H__ 4 + #include <core/os.h> 5 + struct nvkm_subdev; 6 + 7 + struct loader_config { 8 + u32 dma_idx; 9 + u32 code_dma_base; 10 + u32 code_size_total; 11 + u32 code_size_to_load; 12 + u32 code_entry_point; 13 + u32 data_dma_base; 14 + u32 data_size; 15 + u32 overlay_dma_base; 16 + u32 argc; 17 + u32 argv; 18 + u32 code_dma_base1; 19 + u32 data_dma_base1; 20 + u32 overlay_dma_base1; 21 + }; 22 + 23 + void 24 + loader_config_dump(struct nvkm_subdev *, const struct loader_config *); 25 + 26 + struct loader_config_v1 { 27 + u32 reserved; 28 + u32 dma_idx; 29 + u64 code_dma_base; 30 + u32 code_size_total; 31 + u32 code_size_to_load; 32 + u32 code_entry_point; 33 + u64 data_dma_base; 34 + u32 data_size; 35 + u64 overlay_dma_base; 36 + u32 argc; 37 + u32 argv; 38 + } __packed; 39 + 40 + void 41 + loader_config_v1_dump(struct nvkm_subdev *, const struct loader_config_v1 *); 42 + 43 + struct flcn_bl_dmem_desc { 44 + u32 reserved[4]; 45 + u32 signature[4]; 46 + u32 ctx_dma; 47 + u32 code_dma_base; 48 + u32 non_sec_code_off; 49 + u32 non_sec_code_size; 50 + u32 sec_code_off; 51 + u32 sec_code_size; 52 + u32 code_entry_point; 53 + u32 data_dma_base; 54 + u32 data_size; 55 + u32 code_dma_base1; 56 + u32 data_dma_base1; 57 + }; 58 + 59 + void 60 + flcn_bl_dmem_desc_dump(struct nvkm_subdev *, const struct flcn_bl_dmem_desc *); 61 + 62 + struct flcn_bl_dmem_desc_v1 { 63 + u32 reserved[4]; 64 + u32 signature[4]; 65 + u32 ctx_dma; 66 + u64 code_dma_base; 67 + u32 non_sec_code_off; 68 + u32 non_sec_code_size; 69 + u32 sec_code_off; 70 + u32 sec_code_size; 71 + u32 code_entry_point; 72 + u64 data_dma_base; 73 + u32 data_size; 74 + } __packed; 75 + 76 + void flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *, 77 + const struct flcn_bl_dmem_desc_v1 *); 78 + 79 + struct flcn_bl_dmem_desc_v2 { 80 + u32 reserved[4]; 81 + u32 signature[4]; 82 + u32 ctx_dma; 83 + u64 code_dma_base; 84 + u32 non_sec_code_off; 85 + u32 non_sec_code_size; 86 + u32 sec_code_off; 87 + u32 sec_code_size; 88 + u32 code_entry_point; 89 + u64 data_dma_base; 90 + u32 data_size; 91 + u32 argc; 92 + u32 argv; 93 + } __packed; 94 + 95 + void flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *, 96 + const struct flcn_bl_dmem_desc_v2 *); 97 + #endif
+28
drivers/gpu/drm/nouveau/include/nvfw/fw.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVFW_FW_H__ 3 + #define __NVFW_FW_H__ 4 + #include <core/os.h> 5 + struct nvkm_subdev; 6 + 7 + struct nvfw_bin_hdr { 8 + u32 bin_magic; 9 + u32 bin_ver; 10 + u32 bin_size; 11 + u32 header_offset; 12 + u32 data_offset; 13 + u32 data_size; 14 + }; 15 + 16 + const struct nvfw_bin_hdr *nvfw_bin_hdr(struct nvkm_subdev *, const void *); 17 + 18 + struct nvfw_bl_desc { 19 + u32 start_tag; 20 + u32 dmem_load_off; 21 + u32 code_off; 22 + u32 code_size; 23 + u32 data_off; 24 + u32 data_size; 25 + }; 26 + 27 + const struct nvfw_bl_desc *nvfw_bl_desc(struct nvkm_subdev *, const void *); 28 + #endif
+31
drivers/gpu/drm/nouveau/include/nvfw/hs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVFW_HS_H__ 3 + #define __NVFW_HS_H__ 4 + #include <core/os.h> 5 + struct nvkm_subdev; 6 + 7 + struct nvfw_hs_header { 8 + u32 sig_dbg_offset; 9 + u32 sig_dbg_size; 10 + u32 sig_prod_offset; 11 + u32 sig_prod_size; 12 + u32 patch_loc; 13 + u32 patch_sig; 14 + u32 hdr_offset; 15 + u32 hdr_size; 16 + }; 17 + 18 + const struct nvfw_hs_header *nvfw_hs_header(struct nvkm_subdev *, const void *); 19 + 20 + struct nvfw_hs_load_header { 21 + u32 non_sec_code_off; 22 + u32 non_sec_code_size; 23 + u32 data_dma_base; 24 + u32 data_size; 25 + u32 num_apps; 26 + u32 apps[0]; 27 + }; 28 + 29 + const struct nvfw_hs_load_header * 30 + nvfw_hs_load_header(struct nvkm_subdev *, const void *); 31 + #endif
+53
drivers/gpu/drm/nouveau/include/nvfw/ls.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVFW_LS_H__ 3 + #define __NVFW_LS_H__ 4 + #include <core/os.h> 5 + struct nvkm_subdev; 6 + 7 + struct nvfw_ls_desc_head { 8 + u32 descriptor_size; 9 + u32 image_size; 10 + u32 tools_version; 11 + u32 app_version; 12 + char date[64]; 13 + u32 bootloader_start_offset; 14 + u32 bootloader_size; 15 + u32 bootloader_imem_offset; 16 + u32 bootloader_entry_point; 17 + u32 app_start_offset; 18 + u32 app_size; 19 + u32 app_imem_offset; 20 + u32 app_imem_entry; 21 + u32 app_dmem_offset; 22 + u32 app_resident_code_offset; 23 + u32 app_resident_code_size; 24 + u32 app_resident_data_offset; 25 + u32 app_resident_data_size; 26 + }; 27 + 28 + struct nvfw_ls_desc { 29 + struct nvfw_ls_desc_head head; 30 + u32 nb_overlays; 31 + struct { 32 + u32 start; 33 + u32 size; 34 + } load_ovl[64]; 35 + u32 compressed; 36 + }; 37 + 38 + const struct nvfw_ls_desc *nvfw_ls_desc(struct nvkm_subdev *, const void *); 39 + 40 + struct nvfw_ls_desc_v1 { 41 + struct nvfw_ls_desc_head head; 42 + u32 nb_imem_overlays; 43 + u32 nb_dmem_overlays; 44 + struct { 45 + u32 start; 46 + u32 size; 47 + } load_ovl[64]; 48 + u32 compressed; 49 + }; 50 + 51 + const struct nvfw_ls_desc_v1 * 52 + nvfw_ls_desc_v1(struct nvkm_subdev *, const void *); 53 + #endif
+98
drivers/gpu/drm/nouveau/include/nvfw/pmu.h
··· 1 + #ifndef __NVFW_PMU_H__ 2 + #define __NVFW_PMU_H__ 3 + 4 + struct nv_pmu_args { 5 + u32 reserved; 6 + u32 freq_hz; 7 + u32 trace_size; 8 + u32 trace_dma_base; 9 + u16 trace_dma_base1; 10 + u8 trace_dma_offset; 11 + u32 trace_dma_idx; 12 + bool secure_mode; 13 + bool raise_priv_sec; 14 + struct { 15 + u32 dma_base; 16 + u16 dma_base1; 17 + u8 dma_offset; 18 + u16 fb_size; 19 + u8 dma_idx; 20 + } gc6_ctx; 21 + u8 pad; 22 + }; 23 + 24 + #define NV_PMU_UNIT_INIT 0x07 25 + #define NV_PMU_UNIT_ACR 0x0a 26 + 27 + struct nv_pmu_init_msg { 28 + struct nv_falcon_msg hdr; 29 + #define NV_PMU_INIT_MSG_INIT 0x00 30 + u8 msg_type; 31 + 32 + u8 pad; 33 + u16 os_debug_entry_point; 34 + 35 + struct { 36 + u16 size; 37 + u16 offset; 38 + u8 index; 39 + u8 pad; 40 + } queue_info[5]; 41 + 42 + u16 sw_managed_area_offset; 43 + u16 sw_managed_area_size; 44 + }; 45 + 46 + struct nv_pmu_acr_cmd { 47 + struct nv_falcon_cmd hdr; 48 + #define NV_PMU_ACR_CMD_INIT_WPR_REGION 0x00 49 + #define NV_PMU_ACR_CMD_BOOTSTRAP_FALCON 0x01 50 + #define NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS 0x03 51 + u8 cmd_type; 52 + }; 53 + 54 + struct nv_pmu_acr_msg { 55 + struct nv_falcon_cmd hdr; 56 + u8 msg_type; 57 + }; 58 + 59 + struct nv_pmu_acr_init_wpr_region_cmd { 60 + struct nv_pmu_acr_cmd cmd; 61 + u32 region_id; 62 + u32 wpr_offset; 63 + }; 64 + 65 + struct nv_pmu_acr_init_wpr_region_msg { 66 + struct nv_pmu_acr_msg msg; 67 + u32 error_code; 68 + }; 69 + 70 + struct nv_pmu_acr_bootstrap_falcon_cmd { 71 + struct nv_pmu_acr_cmd cmd; 72 + #define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 73 + #define NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 74 + u32 flags; 75 + u32 falcon_id; 76 + }; 77 + 78 + struct nv_pmu_acr_bootstrap_falcon_msg { 79 + struct nv_pmu_acr_msg msg; 80 + u32 falcon_id; 81 + }; 82 + 83 + struct nv_pmu_acr_bootstrap_multiple_falcons_cmd { 84 + struct nv_pmu_acr_cmd cmd; 85 + #define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES 0x00000000 86 + #define NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_NO 0x00000001 87 + u32 flags; 88 + u32 falcon_mask; 89 + u32 use_va_mask; 90 + u32 wpr_lo; 91 + u32 wpr_hi; 92 + }; 93 + 94 + struct nv_pmu_acr_bootstrap_multiple_falcons_msg { 95 + struct nv_pmu_acr_msg msg; 96 + u32 falcon_mask; 97 + }; 98 + #endif
+60
drivers/gpu/drm/nouveau/include/nvfw/sec2.h
··· 1 + #ifndef __NVFW_SEC2_H__ 2 + #define __NVFW_SEC2_H__ 3 + 4 + struct nv_sec2_args { 5 + u32 freq_hz; 6 + u32 falc_trace_size; 7 + u32 falc_trace_dma_base; 8 + u32 falc_trace_dma_idx; 9 + bool secure_mode; 10 + }; 11 + 12 + #define NV_SEC2_UNIT_INIT 0x01 13 + #define NV_SEC2_UNIT_ACR 0x08 14 + 15 + struct nv_sec2_init_msg { 16 + struct nv_falcon_msg hdr; 17 + #define NV_SEC2_INIT_MSG_INIT 0x00 18 + u8 msg_type; 19 + 20 + u8 num_queues; 21 + u16 os_debug_entry_point; 22 + 23 + struct { 24 + u32 offset; 25 + u16 size; 26 + u8 index; 27 + #define NV_SEC2_INIT_MSG_QUEUE_ID_CMDQ 0x00 28 + #define NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ 0x01 29 + u8 id; 30 + } queue_info[2]; 31 + 32 + u32 sw_managed_area_offset; 33 + u16 sw_managed_area_size; 34 + }; 35 + 36 + struct nv_sec2_acr_cmd { 37 + struct nv_falcon_cmd hdr; 38 + #define NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON 0x00 39 + u8 cmd_type; 40 + }; 41 + 42 + struct nv_sec2_acr_msg { 43 + struct nv_falcon_cmd hdr; 44 + u8 msg_type; 45 + }; 46 + 47 + struct nv_sec2_acr_bootstrap_falcon_cmd { 48 + struct nv_sec2_acr_cmd cmd; 49 + #define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES 0x00000000 50 + #define NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_NO 0x00000001 51 + u32 flags; 52 + u32 falcon_id; 53 + }; 54 + 55 + struct nv_sec2_acr_bootstrap_falcon_msg { 56 + struct nv_sec2_acr_msg msg; 57 + u32 error_code; 58 + u32 falcon_id; 59 + }; 60 + #endif
+3
drivers/gpu/drm/nouveau/include/nvif/class.h
··· 166 166 167 167 #define VOLTA_A /* cl9097.h */ 0x0000c397 168 168 169 + #define TURING_A /* cl9097.h */ 0x0000c597 170 + 169 171 #define NV74_BSP 0x000074b0 170 172 171 173 #define GT212_MSVLD 0x000085b1 ··· 209 207 #define PASCAL_COMPUTE_A 0x0000c0c0 210 208 #define PASCAL_COMPUTE_B 0x0000c1c0 211 209 #define VOLTA_COMPUTE_A 0x0000c3c0 210 + #define TURING_COMPUTE_A 0x0000c5c0 212 211 213 212 #define NV74_CIPHER 0x000074c1 214 213 #endif
+1 -1
drivers/gpu/drm/nouveau/include/nvif/if0008.h
··· 35 35 36 36 struct nvif_mmu_kind_v0 { 37 37 __u8 version; 38 - __u8 pad01[1]; 38 + __u8 kind_inv; 39 39 __u16 count; 40 40 __u8 data[]; 41 41 };
+2 -2
drivers/gpu/drm/nouveau/include/nvif/mmu.h
··· 7 7 u8 dmabits; 8 8 u8 heap_nr; 9 9 u8 type_nr; 10 + u8 kind_inv; 10 11 u16 kind_nr; 11 12 s32 mem; 12 13 ··· 37 36 static inline bool 38 37 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) 39 38 { 40 - const u8 invalid = mmu->kind_nr - 1; 41 39 if (kind) { 42 - if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) 40 + if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) 43 41 return false; 44 42 } 45 43 return true;
+5 -5
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
··· 23 23 NVKM_SUBDEV_MMU, 24 24 NVKM_SUBDEV_BAR, 25 25 NVKM_SUBDEV_FAULT, 26 + NVKM_SUBDEV_ACR, 26 27 NVKM_SUBDEV_PMU, 27 28 NVKM_SUBDEV_VOLT, 28 29 NVKM_SUBDEV_ICCSENSE, 29 30 NVKM_SUBDEV_THERM, 30 31 NVKM_SUBDEV_CLK, 31 32 NVKM_SUBDEV_GSP, 32 - NVKM_SUBDEV_SECBOOT, 33 33 34 34 NVKM_ENGINE_BSP, 35 35 ··· 129 129 struct notifier_block nb; 130 130 } acpi; 131 131 132 + struct nvkm_acr *acr; 132 133 struct nvkm_bar *bar; 133 134 struct nvkm_bios *bios; 134 135 struct nvkm_bus *bus; ··· 150 149 struct nvkm_subdev *mxm; 151 150 struct nvkm_pci *pci; 152 151 struct nvkm_pmu *pmu; 153 - struct nvkm_secboot *secboot; 154 152 struct nvkm_therm *therm; 155 153 struct nvkm_timer *timer; 156 154 struct nvkm_top *top; ··· 169 169 struct nvkm_engine *mspdec; 170 170 struct nvkm_engine *msppp; 171 171 struct nvkm_engine *msvld; 172 - struct nvkm_engine *nvenc[3]; 172 + struct nvkm_nvenc *nvenc[3]; 173 173 struct nvkm_nvdec *nvdec[3]; 174 174 struct nvkm_pm *pm; 175 175 struct nvkm_engine *sec; ··· 202 202 struct nvkm_device_chip { 203 203 const char *name; 204 204 205 + int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **); 205 206 int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **); 206 207 int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **); 207 208 int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **); ··· 223 222 int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **); 224 223 int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **); 225 224 int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **); 226 - int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **); 227 225 int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **); 228 226 int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **); 229 227 int (*top )(struct nvkm_device *, int idx, struct nvkm_top **); ··· 242 242 int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); 243 243 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); 244 244 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); 245 - int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); 245 + int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **); 246 246 int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **); 247 247 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); 248 248 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+77
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
··· 1 + #ifndef __NVKM_FALCON_H__ 2 + #define __NVKM_FALCON_H__ 3 + #include <engine/falcon.h> 4 + 5 + int nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *owner, 6 + const char *name, u32 addr, struct nvkm_falcon *); 7 + void nvkm_falcon_dtor(struct nvkm_falcon *); 8 + 9 + void nvkm_falcon_v1_load_imem(struct nvkm_falcon *, 10 + void *, u32, u32, u16, u8, bool); 11 + void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); 12 + void nvkm_falcon_v1_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); 13 + void nvkm_falcon_v1_bind_context(struct nvkm_falcon *, struct nvkm_memory *); 14 + int nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *, u32); 15 + int nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *, u32); 16 + void nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *, u32 start_addr); 17 + void nvkm_falcon_v1_start(struct nvkm_falcon *); 18 + int nvkm_falcon_v1_enable(struct nvkm_falcon *); 19 + void nvkm_falcon_v1_disable(struct nvkm_falcon *); 20 + 21 + void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *); 22 + int gp102_sec2_flcn_enable(struct nvkm_falcon *); 23 + 24 + #define FLCN_PRINTK(t,f,fmt,a...) do { \ 25 + if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \ 26 + nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \ 27 + else \ 28 + nvkm_##t((f)->owner, fmt"\n", ##a); \ 29 + } while(0) 30 + #define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a) 31 + #define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a) 32 + 33 + /** 34 + * struct nv_falcon_msg - header for all messages 35 + * 36 + * @unit_id: id of firmware process that sent the message 37 + * @size: total size of message 38 + * @ctrl_flags: control flags 39 + * @seq_id: used to match a message from its corresponding command 40 + */ 41 + struct nv_falcon_msg { 42 + u8 unit_id; 43 + u8 size; 44 + u8 ctrl_flags; 45 + u8 seq_id; 46 + }; 47 + 48 + #define nv_falcon_cmd nv_falcon_msg 49 + #define NV_FALCON_CMD_UNIT_ID_REWIND 0x00 50 + 51 + struct nvkm_falcon_qmgr; 52 + int nvkm_falcon_qmgr_new(struct nvkm_falcon *, struct nvkm_falcon_qmgr **); 53 + void nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **); 54 + 55 + typedef int 56 + (*nvkm_falcon_qmgr_callback)(void *priv, struct nv_falcon_msg *); 57 + 58 + struct nvkm_falcon_cmdq; 59 + int nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *, const char *name, 60 + struct nvkm_falcon_cmdq **); 61 + void nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **); 62 + void nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *, 63 + u32 index, u32 offset, u32 size); 64 + void nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *); 65 + int nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *, struct nv_falcon_cmd *, 66 + nvkm_falcon_qmgr_callback, void *priv, 67 + unsigned long timeout_jiffies); 68 + 69 + struct nvkm_falcon_msgq; 70 + int nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *, const char *name, 71 + struct nvkm_falcon_msgq **); 72 + void nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **); 73 + void nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *, 74 + u32 index, u32 offset, u32 size); 75 + int nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *, void *, u32 size); 76 + void nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *); 77 + #endif
+47 -4
drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVKM_FIRMWARE_H__ 3 3 #define __NVKM_FIRMWARE_H__ 4 + #include <core/option.h> 4 5 #include <core/subdev.h> 5 6 6 - int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname, 7 - int min_version, int max_version, 8 - const struct firmware **); 9 - int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, 7 + int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname, int ver, 10 8 const struct firmware **); 11 9 void nvkm_firmware_put(const struct firmware *); 10 + 11 + int nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *path, 12 + const char *name, int ver, struct nvkm_blob *); 13 + int nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *path, 14 + const char *name, int ver, 15 + const struct firmware **); 16 + 17 + #define nvkm_firmware_load(s,l,o,p...) ({ \ 18 + struct nvkm_subdev *_s = (s); \ 19 + const char *_opts = (o); \ 20 + char _option[32]; \ 21 + typeof(l[0]) *_list = (l), *_next, *_fwif = NULL; \ 22 + int _ver, _fwv, _ret = 0; \ 23 + \ 24 + snprintf(_option, sizeof(_option), "Nv%sFw", _opts); \ 25 + _ver = nvkm_longopt(_s->device->cfgopt, _option, -2); \ 26 + if (_ver >= -1) { \ 27 + for (_next = _list; !_fwif && _next->load; _next++) { \ 28 + if (_next->version == _ver) \ 29 + _fwif = _next; \ 30 + } \ 31 + _ret = _fwif ? 0 : -EINVAL; \ 32 + } \ 33 + \ 34 + if (_ret == 0) { \ 35 + snprintf(_option, sizeof(_option), "Nv%sFwVer", _opts); \ 36 + _fwv = _fwif ? _fwif->version : -1; \ 37 + _ver = nvkm_longopt(_s->device->cfgopt, _option, _fwv); \ 38 + for (_next = _fwif ? _fwif : _list; _next->load; _next++) { \ 39 + _fwv = (_ver >= 0) ? _ver : _next->version; \ 40 + _ret = _next->load(p, _fwv, _next); \ 41 + if (_ret == 0 || _ver >= 0) { \ 42 + _fwif = _next; \ 43 + break; \ 44 + } \ 45 + } \ 46 + } \ 47 + \ 48 + if (_ret) { \ 49 + nvkm_error(_s, "failed to load firmware\n"); \ 50 + _fwif = ERR_PTR(_ret); \ 51 + } \ 52 + \ 53 + _fwif; \ 54 + }) 12 55 #endif
+16
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
··· 84 84 nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \ 85 85 } while(0) 86 86 87 + #define nvkm_robj(o,a,p,s) do { \ 88 + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ 89 + while (_size--) { \ 90 + *(_data++) = nvkm_ro32((o), _addr); \ 91 + _addr += 4; \ 92 + } \ 93 + } while(0) 94 + 95 + #define nvkm_wobj(o,a,p,s) do { \ 96 + u32 _addr = (a), _size = (s) >> 2, *_data = (void *)(p); \ 97 + while (_size--) { \ 98 + nvkm_wo32((o), _addr, *(_data++)); \ 99 + _addr += 4; \ 100 + } \ 101 + } while(0) 102 + 87 103 #define nvkm_fill(t,s,o,a,d,c) do { \ 88 104 u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \ 89 105 u##t __iomem *_m = nvkm_kmap(o); \
-43
drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_CORE_MSGQUEUE_H 24 - #define __NVKM_CORE_MSGQUEUE_H 25 - #include <subdev/secboot.h> 26 - struct nvkm_msgqueue; 27 - 28 - /* Hopefully we will never have firmware arguments larger than that... */ 29 - #define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100 30 - 31 - int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *, 32 - struct nvkm_msgqueue **); 33 - void nvkm_msgqueue_del(struct nvkm_msgqueue **); 34 - void nvkm_msgqueue_recv(struct nvkm_msgqueue *); 35 - int nvkm_msgqueue_reinit(struct nvkm_msgqueue *); 36 - 37 - /* useful if we run a NVIDIA-signed firmware */ 38 - void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *); 39 - 40 - /* interface to ACR unit running on falcon (NVIDIA signed firmware) */ 41 - int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long); 42 - 43 - #endif
+13
drivers/gpu/drm/nouveau/include/nvkm/core/os.h
··· 21 21 iowrite32_native(lower_32_bits(_v), &_p[0]); \ 22 22 iowrite32_native(upper_32_bits(_v), &_p[1]); \ 23 23 } while(0) 24 + 25 + struct nvkm_blob { 26 + void *data; 27 + u32 size; 28 + }; 29 + 30 + static inline void 31 + nvkm_blob_dtor(struct nvkm_blob *blob) 32 + { 33 + kfree(blob->data); 34 + blob->data = NULL; 35 + blob->size = 0; 36 + } 24 37 #endif
+16 -4
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 - #ifndef __NVKM_FALCON_H__ 3 - #define __NVKM_FALCON_H__ 2 + #ifndef __NVKM_FLCNEN_H__ 3 + #define __NVKM_FLCNEN_H__ 4 4 #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine) 5 5 #include <core/engine.h> 6 6 struct nvkm_fifo_chan; ··· 23 23 24 24 struct mutex mutex; 25 25 struct mutex dmem_mutex; 26 + bool oneinit; 27 + 26 28 const struct nvkm_subdev *user; 27 29 28 30 u8 version; 29 31 u8 secret; 30 32 bool debug; 31 - bool has_emem; 32 33 33 34 struct nvkm_memory *core; 34 35 bool external; ··· 77 76 } data; 78 77 void (*init)(struct nvkm_falcon *); 79 78 void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); 79 + 80 + u32 debug; 81 + u32 fbif; 82 + 80 83 void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); 81 84 void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); 82 85 void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); 86 + u32 emem_addr; 83 87 void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *); 84 88 int (*wait_for_halt)(struct nvkm_falcon *, u32); 85 89 int (*clear_interrupt)(struct nvkm_falcon *, u32); ··· 92 86 void (*start)(struct nvkm_falcon *); 93 87 int (*enable)(struct nvkm_falcon *falcon); 94 88 void (*disable)(struct nvkm_falcon *falcon); 89 + int (*reset)(struct nvkm_falcon *); 90 + 91 + struct { 92 + u32 head; 93 + u32 tail; 94 + u32 stride; 95 + } cmdq, msgq; 95 96 96 97 struct nvkm_sclass sclass[]; 97 98 }; ··· 135 122 int nvkm_falcon_enable(struct nvkm_falcon *); 136 123 void nvkm_falcon_disable(struct nvkm_falcon *); 137 124 int nvkm_falcon_reset(struct nvkm_falcon *); 138 - 139 125 #endif
+2
drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
··· 50 50 int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 51 51 int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 52 52 int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 53 + int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 53 54 int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 54 55 int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 56 + int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 55 57 #endif
+4 -4
drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
··· 3 3 #define __NVKM_NVDEC_H__ 4 4 #define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine) 5 5 #include <core/engine.h> 6 + #include <core/falcon.h> 6 7 7 8 struct nvkm_nvdec { 9 + const struct nvkm_nvdec_func *func; 8 10 struct nvkm_engine engine; 9 - u32 addr; 10 - 11 - struct nvkm_falcon *falcon; 11 + struct nvkm_falcon falcon; 12 12 }; 13 13 14 - int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); 14 + int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); 15 15 #endif
+10
drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVKM_NVENC_H__ 3 3 #define __NVKM_NVENC_H__ 4 + #define nvkm_nvenc(p) container_of((p), struct nvkm_nvenc, engine) 4 5 #include <core/engine.h> 6 + #include <core/falcon.h> 7 + 8 + struct nvkm_nvenc { 9 + const struct nvkm_nvenc_func *func; 10 + struct nvkm_engine engine; 11 + struct nvkm_falcon falcon; 12 + }; 13 + 14 + int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **); 5 15 #endif
+10 -3
drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVKM_SEC2_H__ 3 3 #define __NVKM_SEC2_H__ 4 + #define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) 4 5 #include <core/engine.h> 6 + #include <core/falcon.h> 5 7 6 8 struct nvkm_sec2 { 9 + const struct nvkm_sec2_func *func; 7 10 struct nvkm_engine engine; 8 - u32 addr; 11 + struct nvkm_falcon falcon; 9 12 10 - struct nvkm_falcon *falcon; 11 - struct nvkm_msgqueue *queue; 13 + struct nvkm_falcon_qmgr *qmgr; 14 + struct nvkm_falcon_cmdq *cmdq; 15 + struct nvkm_falcon_msgq *msgq; 16 + 12 17 struct work_struct work; 18 + bool initmsg_received; 13 19 }; 14 20 15 21 int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); 22 + int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); 16 23 int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); 17 24 #endif
+126
drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_ACR_H__ 3 + #define __NVKM_ACR_H__ 4 + #define nvkm_acr(p) container_of((p), struct nvkm_acr, subdev) 5 + #include <core/subdev.h> 6 + #include <core/falcon.h> 7 + 8 + enum nvkm_acr_lsf_id { 9 + NVKM_ACR_LSF_PMU = 0, 10 + NVKM_ACR_LSF_GSPLITE = 1, 11 + NVKM_ACR_LSF_FECS = 2, 12 + NVKM_ACR_LSF_GPCCS = 3, 13 + NVKM_ACR_LSF_NVDEC = 4, 14 + NVKM_ACR_LSF_SEC2 = 7, 15 + NVKM_ACR_LSF_MINION = 10, 16 + NVKM_ACR_LSF_NUM 17 + }; 18 + 19 + static inline const char * 20 + nvkm_acr_lsf_id(enum nvkm_acr_lsf_id id) 21 + { 22 + switch (id) { 23 + case NVKM_ACR_LSF_PMU : return "pmu"; 24 + case NVKM_ACR_LSF_GSPLITE: return "gsplite"; 25 + case NVKM_ACR_LSF_FECS : return "fecs"; 26 + case NVKM_ACR_LSF_GPCCS : return "gpccs"; 27 + case NVKM_ACR_LSF_NVDEC : return "nvdec"; 28 + case NVKM_ACR_LSF_SEC2 : return "sec2"; 29 + case NVKM_ACR_LSF_MINION : return "minion"; 30 + default: 31 + return "unknown"; 32 + } 33 + } 34 + 35 + struct nvkm_acr { 36 + const struct nvkm_acr_func *func; 37 + struct nvkm_subdev subdev; 38 + 39 + struct list_head hsfw, hsf; 40 + struct list_head lsfw, lsf; 41 + 42 + struct nvkm_memory *wpr; 43 + u64 wpr_start; 44 + u64 wpr_end; 45 + u64 shadow_start; 46 + 47 + struct nvkm_memory *inst; 48 + struct nvkm_vmm *vmm; 49 + 50 + bool done; 51 + 52 + const struct firmware *wpr_fw; 53 + bool wpr_comp; 54 + u64 wpr_prev; 55 + }; 56 + 57 + bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id); 58 + int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask); 59 + 60 + int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 61 + int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 62 + int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 63 + int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 64 + int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 65 + int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); 66 + 67 + struct nvkm_acr_lsfw { 68 + const struct nvkm_acr_lsf_func *func; 69 + struct nvkm_falcon *falcon; 70 + enum nvkm_acr_lsf_id id; 71 + 72 + struct list_head head; 73 + 74 + struct nvkm_blob img; 75 + 76 + const struct firmware *sig; 77 + 78 + u32 bootloader_size; 79 + u32 bootloader_imem_offset; 80 + 81 + u32 app_size; 82 + u32 app_start_offset; 83 + u32 app_imem_entry; 84 + u32 app_resident_code_offset; 85 + u32 app_resident_code_size; 86 + u32 app_resident_data_offset; 87 + u32 app_resident_data_size; 88 + 89 + u32 ucode_size; 90 + u32 data_size; 91 + 92 + struct { 93 + u32 lsb; 94 + u32 img; 95 + u32 bld; 96 + } offset; 97 + u32 bl_data_size; 98 + }; 99 + 100 + struct nvkm_acr_lsf_func { 101 + /* The (currently) map directly to LSB header flags. */ 102 + #define NVKM_ACR_LSF_LOAD_CODE_AT_0 0x00000001 103 + #define NVKM_ACR_LSF_DMACTL_REQ_CTX 0x00000004 104 + #define NVKM_ACR_LSF_FORCE_PRIV_LOAD 0x00000008 105 + u32 flags; 106 + u32 bld_size; 107 + void (*bld_write)(struct nvkm_acr *, u32 bld, struct nvkm_acr_lsfw *); 108 + void (*bld_patch)(struct nvkm_acr *, u32 bld, s64 adjust); 109 + int (*boot)(struct nvkm_falcon *); 110 + int (*bootstrap_falcon)(struct nvkm_falcon *, enum nvkm_acr_lsf_id); 111 + int (*bootstrap_multiple_falcons)(struct nvkm_falcon *, u32 mask); 112 + }; 113 + 114 + int 115 + nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *, struct nvkm_falcon *, 116 + enum nvkm_acr_lsf_id, const char *path, 117 + int ver, const struct nvkm_acr_lsf_func *); 118 + int 119 + nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *, struct nvkm_falcon *, 120 + enum nvkm_acr_lsf_id, const char *path, 121 + int ver, const struct nvkm_acr_lsf_func *); 122 + int 123 + nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *, struct nvkm_falcon *, 124 + enum nvkm_acr_lsf_id, const char *path, 125 + int ver, const struct nvkm_acr_lsf_func *); 126 + #endif
+1
drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
··· 31 31 }; 32 32 33 33 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); 34 + int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **); 34 35 int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); 35 36 int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **); 36 37 #endif
+2
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
··· 33 33 const struct nvkm_fb_func *func; 34 34 struct nvkm_subdev subdev; 35 35 36 + struct nvkm_blob vpr_scrubber; 37 + 36 38 struct nvkm_ram *ram; 37 39 struct nvkm_mm tags; 38 40
+2 -3
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
··· 2 2 #define __NVKM_GSP_H__ 3 3 #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev) 4 4 #include <core/subdev.h> 5 + #include <core/falcon.h> 5 6 6 7 struct nvkm_gsp { 7 8 struct nvkm_subdev subdev; 8 - u32 addr; 9 - 10 - struct nvkm_falcon *falcon; 9 + struct nvkm_falcon falcon; 11 10 }; 12 11 13 12 int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **);
+1
drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
··· 40 40 int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 41 41 int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 42 42 int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 43 + int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 43 44 #endif
+11 -3
drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
··· 2 2 #ifndef __NVKM_PMU_H__ 3 3 #define __NVKM_PMU_H__ 4 4 #include <core/subdev.h> 5 - #include <engine/falcon.h> 5 + #include <core/falcon.h> 6 6 7 7 struct nvkm_pmu { 8 8 const struct nvkm_pmu_func *func; 9 9 struct nvkm_subdev subdev; 10 - struct nvkm_falcon *falcon; 11 - struct nvkm_msgqueue *queue; 10 + struct nvkm_falcon falcon; 11 + 12 + struct nvkm_falcon_qmgr *qmgr; 13 + struct nvkm_falcon_cmdq *hpq; 14 + struct nvkm_falcon_cmdq *lpq; 15 + struct nvkm_falcon_msgq *msgq; 16 + bool initmsg_received; 17 + 18 + struct completion wpr_ready; 12 19 13 20 struct { 14 21 u32 base; ··· 50 43 int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 51 44 int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 52 45 int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 46 + int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); 53 47 54 48 /* interface to MEMX process running on PMU */ 55 49 struct nvkm_memx;
+3 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1162 1162 void 1163 1163 nouveau_bo_move_init(struct nouveau_drm *drm) 1164 1164 { 1165 - static const struct { 1165 + static const struct _method_table { 1166 1166 const char *name; 1167 1167 int engine; 1168 1168 s32 oclass; ··· 1192 1192 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, 1193 1193 {}, 1194 1194 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, 1195 - }, *mthd = _methods; 1195 + }; 1196 + const struct _method_table *mthd = _methods; 1196 1197 const char *name = "CPU"; 1197 1198 int ret; 1198 1199
+2 -2
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 635 635 unsigned long c, i; 636 636 int ret = -ENOMEM; 637 637 638 - args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL); 638 + args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL); 639 639 if (!args.src) 640 640 goto out; 641 - args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL); 641 + args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL); 642 642 if (!args.dst) 643 643 goto out_free_src; 644 644
+1 -2
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 715 715 void 716 716 nouveau_drm_device_remove(struct drm_device *dev) 717 717 { 718 - struct pci_dev *pdev = dev->pdev; 719 718 struct nouveau_drm *drm = nouveau_drm(dev); 720 719 struct nvkm_client *client; 721 720 struct nvkm_device *device; ··· 726 727 device = nvkm_device_find(client->device); 727 728 728 729 nouveau_drm_device_fini(dev); 729 - pci_disable_device(pdev); 730 730 drm_dev_put(dev); 731 731 nvkm_device_del(&device); 732 732 } ··· 736 738 struct drm_device *dev = pci_get_drvdata(pdev); 737 739 738 740 nouveau_drm_device_remove(dev); 741 + pci_disable_device(pdev); 739 742 } 740 743 741 744 static int
+1 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 156 156 157 157 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 158 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); 159 - if (nouveau_fence_update(fence->channel, fctx)) 159 + if (nouveau_fence_update(chan, fctx)) 160 160 ret = NVIF_NOTIFY_DROP; 161 161 } 162 162 spin_unlock_irqrestore(&fctx->lock, flags);
+1 -1
drivers/gpu/drm/nouveau/nouveau_hwmon.c
··· 741 741 special_groups[i++] = &pwm_fan_sensor_group; 742 742 } 743 743 744 - special_groups[i] = 0; 744 + special_groups[i] = NULL; 745 745 hwmon_dev = hwmon_device_register_with_info(dev->dev, "nouveau", dev, 746 746 &nouveau_chip_info, 747 747 special_groups);
-4
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 63 63 { 64 64 struct nouveau_bo *nvbo = nouveau_bo(bo); 65 65 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 66 - struct nouveau_mem *mem; 67 66 int ret; 68 67 69 68 if (drm->client.device.info.ram_size == 0) 70 69 return -ENOMEM; 71 70 72 71 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); 73 - mem = nouveau_mem(reg); 74 72 if (ret) 75 73 return ret; 76 74 ··· 101 103 { 102 104 struct nouveau_bo *nvbo = nouveau_bo(bo); 103 105 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 104 - struct nouveau_mem *mem; 105 106 int ret; 106 107 107 108 ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); 108 - mem = nouveau_mem(reg); 109 109 if (ret) 110 110 return ret; 111 111
+1
drivers/gpu/drm/nouveau/nvif/mmu.c
··· 121 121 kind, argc); 122 122 if (ret == 0) 123 123 memcpy(mmu->kind, kind->data, kind->count); 124 + mmu->kind_inv = kind->kind_inv; 124 125 kfree(kind); 125 126 } 126 127
+1
drivers/gpu/drm/nouveau/nvkm/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 2 include $(src)/nvkm/core/Kbuild 3 + include $(src)/nvkm/nvfw/Kbuild 3 4 include $(src)/nvkm/falcon/Kbuild 4 5 include $(src)/nvkm/subdev/Kbuild 5 6 include $(src)/nvkm/engine/Kbuild
+45 -22
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
··· 22 22 #include <core/device.h> 23 23 #include <core/firmware.h> 24 24 25 + int 26 + nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base, 27 + const char *name, int ver, const struct firmware **pfw) 28 + { 29 + char path[64]; 30 + int ret; 31 + 32 + snprintf(path, sizeof(path), "%s%s", base, name); 33 + ret = nvkm_firmware_get(subdev, path, ver, pfw); 34 + if (ret < 0) 35 + return ret; 36 + 37 + return 0; 38 + } 39 + 40 + int 41 + nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base, 42 + const char *name, int ver, struct nvkm_blob *blob) 43 + { 44 + const struct firmware *fw; 45 + int ret; 46 + 47 + ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw); 48 + if (ret == 0) { 49 + blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL); 50 + blob->size = fw->size; 51 + nvkm_firmware_put(fw); 52 + if (!blob->data) 53 + return -ENOMEM; 54 + } 55 + 56 + return ret; 57 + } 58 + 25 59 /** 26 60 * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory 27 61 * @subdev subdevice that will use that firmware ··· 66 32 * Firmware files released by NVIDIA will always follow this format. 67 33 */ 68 34 int 69 - nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname, 70 - int min_version, int max_version, 71 - const struct firmware **fw) 35 + nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, 36 + const struct firmware **fw) 72 37 { 73 38 struct nvkm_device *device = subdev->device; 74 39 char f[64]; ··· 83 50 cname[i] = tolower(cname[i]); 84 51 } 85 52 86 - for (i = max_version; i >= min_version; i--) { 87 - if (i != 0) 88 - snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i); 89 - else 90 - snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); 53 + if (ver != 0) 54 + snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver); 55 + else 56 + snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); 91 57 92 - if (!firmware_request_nowarn(fw, f, device->dev)) { 93 - nvkm_debug(subdev, "firmware \"%s\" loaded\n", f); 94 - return i; 95 - } 96 - 97 - nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); 58 + if (!firmware_request_nowarn(fw, f, device->dev)) { 59 + nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n", 60 + f, (*fw)->size); 61 + return 0; 98 62 } 99 63 100 - nvkm_error(subdev, "failed to load firmware \"%s\"", fwname); 64 + nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); 101 65 return -ENOENT; 102 - } 103 - 104 - int 105 - nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, 106 - const struct firmware **fw) 107 - { 108 - return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw); 109 66 } 110 67 111 68 /**
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
··· 30 30 31 31 const char * 32 32 nvkm_subdev_name[NVKM_SUBDEV_NR] = { 33 + [NVKM_SUBDEV_ACR ] = "acr", 33 34 [NVKM_SUBDEV_BAR ] = "bar", 34 35 [NVKM_SUBDEV_VBIOS ] = "bios", 35 36 [NVKM_SUBDEV_BUS ] = "bus", ··· 51 50 [NVKM_SUBDEV_MXM ] = "mxm", 52 51 [NVKM_SUBDEV_PCI ] = "pci", 53 52 [NVKM_SUBDEV_PMU ] = "pmu", 54 - [NVKM_SUBDEV_SECBOOT ] = "secboot", 55 53 [NVKM_SUBDEV_THERM ] = "therm", 56 54 [NVKM_SUBDEV_TIMER ] = "tmr", 57 55 [NVKM_SUBDEV_TOP ] = "top",
+73 -35
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 1987 1987 .dma = gf119_dma_new, 1988 1988 .fifo = gm107_fifo_new, 1989 1989 .gr = gm107_gr_new, 1990 + .nvdec[0] = gm107_nvdec_new, 1991 + .nvenc[0] = gm107_nvenc_new, 1990 1992 .sw = gf100_sw_new, 1991 1993 }; 1992 1994 ··· 2029 2027 static const struct nvkm_device_chip 2030 2028 nv120_chipset = { 2031 2029 .name = "GM200", 2030 + .acr = gm200_acr_new, 2032 2031 .bar = gm107_bar_new, 2033 2032 .bios = nvkm_bios_new, 2034 2033 .bus = gf100_bus_new, ··· 2048 2045 .pci = gk104_pci_new, 2049 2046 .pmu = gm107_pmu_new, 2050 2047 .therm = gm200_therm_new, 2051 - .secboot = gm200_secboot_new, 2052 2048 .timer = gk20a_timer_new, 2053 2049 .top = gk104_top_new, 2054 2050 .volt = gk104_volt_new, ··· 2058 2056 .dma = gf119_dma_new, 2059 2057 .fifo = gm200_fifo_new, 2060 2058 .gr = gm200_gr_new, 2059 + .nvdec[0] = gm107_nvdec_new, 2060 + .nvenc[0] = gm107_nvenc_new, 2061 + .nvenc[1] = gm107_nvenc_new, 2061 2062 .sw = gf100_sw_new, 2062 2063 }; 2063 2064 2064 2065 static const struct nvkm_device_chip 2065 2066 nv124_chipset = { 2066 2067 .name = "GM204", 2068 + .acr = gm200_acr_new, 2067 2069 .bar = gm107_bar_new, 2068 2070 .bios = nvkm_bios_new, 2069 2071 .bus = gf100_bus_new, ··· 2086 2080 .pci = gk104_pci_new, 2087 2081 .pmu = gm107_pmu_new, 2088 2082 .therm = gm200_therm_new, 2089 - .secboot = gm200_secboot_new, 2090 2083 .timer = gk20a_timer_new, 2091 2084 .top = gk104_top_new, 2092 2085 .volt = gk104_volt_new, ··· 2096 2091 .dma = gf119_dma_new, 2097 2092 .fifo = gm200_fifo_new, 2098 2093 .gr = gm200_gr_new, 2094 + .nvdec[0] = gm107_nvdec_new, 2095 + .nvenc[0] = gm107_nvenc_new, 2096 + .nvenc[1] = gm107_nvenc_new, 2099 2097 .sw = gf100_sw_new, 2100 2098 }; 2101 2099 2102 2100 static const struct nvkm_device_chip 2103 2101 nv126_chipset = { 2104 2102 .name = "GM206", 2103 + .acr = gm200_acr_new, 2105 2104 .bar = gm107_bar_new, 2106 2105 .bios = nvkm_bios_new, 2107 2106 .bus = gf100_bus_new, ··· 2124 2115 .pci = gk104_pci_new, 2125 2116 .pmu = gm107_pmu_new, 2126 2117 .therm = gm200_therm_new, 2127 - .secboot = gm200_secboot_new, 2128 2118 .timer = gk20a_timer_new, 2129 2119 .top = gk104_top_new, 2130 2120 .volt = gk104_volt_new, ··· 2134 2126 .dma = gf119_dma_new, 2135 2127 .fifo = gm200_fifo_new, 2136 2128 .gr = gm200_gr_new, 2129 + .nvdec[0] = gm107_nvdec_new, 2130 + .nvenc[0] = gm107_nvenc_new, 2137 2131 .sw = gf100_sw_new, 2138 2132 }; 2139 2133 2140 2134 static const struct nvkm_device_chip 2141 2135 nv12b_chipset = { 2142 2136 .name = "GM20B", 2137 + .acr = gm20b_acr_new, 2143 2138 .bar = gm20b_bar_new, 2144 2139 .bus = gf100_bus_new, 2145 2140 .clk = gm20b_clk_new, ··· 2154 2143 .mc = gk20a_mc_new, 2155 2144 .mmu = gm20b_mmu_new, 2156 2145 .pmu = gm20b_pmu_new, 2157 - .secboot = gm20b_secboot_new, 2158 2146 .timer = gk20a_timer_new, 2159 2147 .top = gk104_top_new, 2160 2148 .ce[2] = gm200_ce_new, ··· 2167 2157 static const struct nvkm_device_chip 2168 2158 nv130_chipset = { 2169 2159 .name = "GP100", 2160 + .acr = gm200_acr_new, 2170 2161 .bar = gm107_bar_new, 2171 2162 .bios = nvkm_bios_new, 2172 2163 .bus = gf100_bus_new, ··· 2183 2172 .mc = gp100_mc_new, 2184 2173 .mmu = gp100_mmu_new, 2185 2174 .therm = gp100_therm_new, 2186 - .secboot = gm200_secboot_new, 2187 2175 .pci = gp100_pci_new, 2188 2176 .pmu = gp100_pmu_new, 2189 2177 .timer = gk20a_timer_new, ··· 2197 2187 .disp = gp100_disp_new, 2198 2188 .fifo = gp100_fifo_new, 2199 2189 .gr = gp100_gr_new, 2190 + .nvdec[0] = gm107_nvdec_new, 2191 + .nvenc[0] = gm107_nvenc_new, 2192 + .nvenc[1] = gm107_nvenc_new, 2193 + .nvenc[2] = gm107_nvenc_new, 2200 2194 .sw = gf100_sw_new, 2201 2195 }; 2202 2196 2203 2197 static const struct nvkm_device_chip 2204 2198 nv132_chipset = { 2205 2199 .name = "GP102", 2200 + .acr = gp102_acr_new, 2206 2201 .bar = gm107_bar_new, 2207 2202 .bios = nvkm_bios_new, 2208 2203 .bus = gf100_bus_new, ··· 2223 2208 .mc = gp100_mc_new, 2224 2209 .mmu = gp100_mmu_new, 2225 2210 .therm = gp100_therm_new, 2226 - .secboot = gp102_secboot_new, 2227 2211 .pci = gp100_pci_new, 2228 2212 .pmu = gp102_pmu_new, 2229 2213 .timer = gk20a_timer_new, ··· 2235 2221 .dma = gf119_dma_new, 2236 2222 .fifo = gp100_fifo_new, 2237 2223 .gr = gp102_gr_new, 2238 - .nvdec[0] = gp102_nvdec_new, 2224 + .nvdec[0] = gm107_nvdec_new, 2225 + .nvenc[0] = gm107_nvenc_new, 2226 + .nvenc[1] = gm107_nvenc_new, 2239 2227 .sec2 = gp102_sec2_new, 2240 2228 .sw = gf100_sw_new, 2241 2229 }; ··· 2245 2229 static const struct nvkm_device_chip 2246 2230 nv134_chipset = { 2247 2231 .name = "GP104", 2232 + .acr = gp102_acr_new, 2248 2233 .bar = gm107_bar_new, 2249 2234 .bios = nvkm_bios_new, 2250 2235 .bus = gf100_bus_new, ··· 2261 2244 .mc = gp100_mc_new, 2262 2245 .mmu = gp100_mmu_new, 2263 2246 .therm = gp100_therm_new, 2264 - .secboot = gp102_secboot_new, 2265 2247 .pci = gp100_pci_new, 2266 2248 .pmu = gp102_pmu_new, 2267 2249 .timer = gk20a_timer_new, ··· 2273 2257 .dma = gf119_dma_new, 2274 2258 .fifo = gp100_fifo_new, 2275 2259 .gr = gp104_gr_new, 2276 - .nvdec[0] = gp102_nvdec_new, 2260 + .nvdec[0] = gm107_nvdec_new, 2261 + .nvenc[0] = gm107_nvenc_new, 2262 + .nvenc[1] = gm107_nvenc_new, 2277 2263 .sec2 = gp102_sec2_new, 2278 2264 .sw = gf100_sw_new, 2279 2265 }; ··· 2283 2265 static const struct nvkm_device_chip 2284 2266 nv136_chipset = { 2285 2267 .name = "GP106", 2268 + .acr = gp102_acr_new, 2286 2269 .bar = gm107_bar_new, 2287 2270 .bios = nvkm_bios_new, 2288 2271 .bus = gf100_bus_new, ··· 2299 2280 .mc = gp100_mc_new, 2300 2281 .mmu = gp100_mmu_new, 2301 2282 .therm = gp100_therm_new, 2302 - .secboot = gp102_secboot_new, 2303 2283 .pci = gp100_pci_new, 2304 2284 .pmu = gp102_pmu_new, 2305 2285 .timer = gk20a_timer_new, ··· 2311 2293 .dma = gf119_dma_new, 2312 2294 .fifo = gp100_fifo_new, 2313 2295 .gr = gp104_gr_new, 2314 - .nvdec[0] = gp102_nvdec_new, 2296 + .nvdec[0] = gm107_nvdec_new, 2297 + .nvenc[0] = gm107_nvenc_new, 2315 2298 .sec2 = gp102_sec2_new, 2316 2299 .sw = gf100_sw_new, 2317 2300 }; ··· 2320 2301 static const struct nvkm_device_chip 2321 2302 nv137_chipset = { 2322 2303 .name = "GP107", 2304 + .acr = gp102_acr_new, 2323 2305 .bar = gm107_bar_new, 2324 2306 .bios = nvkm_bios_new, 2325 2307 .bus = gf100_bus_new, ··· 2336 2316 .mc = gp100_mc_new, 2337 2317 .mmu = gp100_mmu_new, 2338 2318 .therm = gp100_therm_new, 2339 - .secboot = gp102_secboot_new, 2340 2319 .pci = gp100_pci_new, 2341 2320 .pmu = gp102_pmu_new, 2342 2321 .timer = gk20a_timer_new, ··· 2348 2329 .dma = gf119_dma_new, 2349 2330 .fifo = gp100_fifo_new, 2350 2331 .gr = gp107_gr_new, 2351 - .nvdec[0] = gp102_nvdec_new, 2332 + .nvdec[0] = gm107_nvdec_new, 2333 + .nvenc[0] = gm107_nvenc_new, 2334 + .nvenc[1] = gm107_nvenc_new, 2352 2335 .sec2 = gp102_sec2_new, 2353 2336 .sw = gf100_sw_new, 2354 2337 }; ··· 2358 2337 static const struct nvkm_device_chip 2359 2338 nv138_chipset = { 2360 2339 .name = "GP108", 2340 + .acr = gp108_acr_new, 2361 2341 .bar = gm107_bar_new, 2362 2342 .bios = nvkm_bios_new, 2363 2343 .bus = gf100_bus_new, ··· 2374 2352 .mc = gp100_mc_new, 2375 2353 .mmu = gp100_mmu_new, 2376 2354 .therm = gp100_therm_new, 2377 - .secboot = gp108_secboot_new, 2378 2355 .pci = gp100_pci_new, 2379 2356 .pmu = gp102_pmu_new, 2380 2357 .timer = gk20a_timer_new, ··· 2385 2364 .disp = gp102_disp_new, 2386 2365 .dma = gf119_dma_new, 2387 2366 .fifo = gp100_fifo_new, 2388 - .gr = gp107_gr_new, 2389 - .nvdec[0] = gp102_nvdec_new, 2390 - .sec2 = gp102_sec2_new, 2367 + .gr = gp108_gr_new, 2368 + .nvdec[0] = gm107_nvdec_new, 2369 + .sec2 = gp108_sec2_new, 2391 2370 .sw = gf100_sw_new, 2392 2371 }; 2393 2372 2394 2373 static const struct nvkm_device_chip 2395 2374 nv13b_chipset = { 2396 2375 .name = "GP10B", 2376 + .acr = gp10b_acr_new, 2397 2377 .bar = gm20b_bar_new, 2398 2378 .bus = gf100_bus_new, 2399 - .fault = gp100_fault_new, 2379 + .fault = gp10b_fault_new, 2400 2380 .fb = gp10b_fb_new, 2401 2381 .fuse = gm107_fuse_new, 2402 2382 .ibus = gp10b_ibus_new, 2403 2383 .imem = gk20a_instmem_new, 2404 - .ltc = gp102_ltc_new, 2384 + .ltc = gp10b_ltc_new, 2405 2385 .mc = gp10b_mc_new, 2406 2386 .mmu = gp10b_mmu_new, 2407 - .secboot = gp10b_secboot_new, 2408 - .pmu = gm20b_pmu_new, 2387 + .pmu = gp10b_pmu_new, 2409 2388 .timer = gk20a_timer_new, 2410 2389 .top = gk104_top_new, 2411 - .ce[2] = gp102_ce_new, 2390 + .ce[0] = gp100_ce_new, 2412 2391 .dma = gf119_dma_new, 2413 2392 .fifo = gp10b_fifo_new, 2414 2393 .gr = gp10b_gr_new, ··· 2418 2397 static const struct nvkm_device_chip 2419 2398 nv140_chipset = { 2420 2399 .name = "GV100", 2400 + .acr = gp108_acr_new, 2421 2401 .bar = gm107_bar_new, 2422 2402 .bios = nvkm_bios_new, 2423 2403 .bus = gf100_bus_new, ··· 2436 2414 .mmu = gv100_mmu_new, 2437 2415 .pci = gp100_pci_new, 2438 2416 .pmu = gp102_pmu_new, 2439 - .secboot = gp108_secboot_new, 2440 2417 .therm = gp100_therm_new, 2441 2418 .timer = gk20a_timer_new, 2442 2419 .top = gk104_top_new, ··· 2452 2431 .dma = gv100_dma_new, 2453 2432 .fifo = gv100_fifo_new, 2454 2433 .gr = gv100_gr_new, 2455 - .nvdec[0] = gp102_nvdec_new, 2456 - .sec2 = gp102_sec2_new, 2434 + .nvdec[0] = gm107_nvdec_new, 2435 + .nvenc[0] = gm107_nvenc_new, 2436 + .nvenc[1] = gm107_nvenc_new, 2437 + .nvenc[2] = gm107_nvenc_new, 2438 + .sec2 = gp108_sec2_new, 2457 2439 }; 2458 2440 2459 2441 static const struct nvkm_device_chip 2460 2442 nv162_chipset = { 2461 2443 .name = "TU102", 2444 + .acr = tu102_acr_new, 2462 2445 .bar = tu102_bar_new, 2463 2446 .bios = nvkm_bios_new, 2464 2447 .bus = gf100_bus_new, ··· 2491 2466 .disp = tu102_disp_new, 2492 2467 .dma = gv100_dma_new, 2493 2468 .fifo = tu102_fifo_new, 2494 - .nvdec[0] = gp102_nvdec_new, 2469 + .gr = tu102_gr_new, 2470 + .nvdec[0] = gm107_nvdec_new, 2471 + .nvenc[0] = gm107_nvenc_new, 2495 2472 .sec2 = tu102_sec2_new, 2496 2473 }; 2497 2474 2498 2475 static const struct nvkm_device_chip 2499 2476 nv164_chipset = { 2500 2477 .name = "TU104", 2478 + .acr = tu102_acr_new, 2501 2479 .bar = tu102_bar_new, 2502 2480 .bios = nvkm_bios_new, 2503 2481 .bus = gf100_bus_new, ··· 2529 2501 .disp = tu102_disp_new, 2530 2502 .dma = gv100_dma_new, 2531 2503 .fifo = tu102_fifo_new, 2532 - .nvdec[0] = gp102_nvdec_new, 2504 + .gr = tu102_gr_new, 2505 + .nvdec[0] = gm107_nvdec_new, 2506 + .nvdec[1] = gm107_nvdec_new, 2507 + .nvenc[0] = gm107_nvenc_new, 2533 2508 .sec2 = tu102_sec2_new, 2534 2509 }; 2535 2510 2536 2511 static const struct nvkm_device_chip 2537 2512 nv166_chipset = { 2538 2513 .name = "TU106", 2514 + .acr = tu102_acr_new, 2539 2515 .bar = tu102_bar_new, 2540 2516 .bios = nvkm_bios_new, 2541 2517 .bus = gf100_bus_new, ··· 2568 2536 .disp = tu102_disp_new, 2569 2537 .dma = gv100_dma_new, 2570 2538 .fifo = tu102_fifo_new, 2571 - .nvdec[0] = gp102_nvdec_new, 2539 + .gr = tu102_gr_new, 2540 + .nvdec[0] = gm107_nvdec_new, 2541 + .nvdec[1] = gm107_nvdec_new, 2542 + .nvdec[2] = gm107_nvdec_new, 2543 + .nvenc[0] = gm107_nvenc_new, 2572 2544 .sec2 = tu102_sec2_new, 2573 2545 }; 2574 2546 ··· 2607 2571 .disp = tu102_disp_new, 2608 2572 .dma = gv100_dma_new, 2609 2573 .fifo = tu102_fifo_new, 2610 - .nvdec[0] = gp102_nvdec_new, 2574 + .nvdec[0] = gm107_nvdec_new, 2575 + .nvenc[0] = gm107_nvenc_new, 2611 2576 .sec2 = tu102_sec2_new, 2612 2577 }; 2613 2578 ··· 2643 2606 .disp = tu102_disp_new, 2644 2607 .dma = gv100_dma_new, 2645 2608 .fifo = tu102_fifo_new, 2646 - .nvdec[0] = gp102_nvdec_new, 2609 + .nvdec[0] = gm107_nvdec_new, 2610 + .nvenc[0] = gm107_nvenc_new, 2647 2611 .sec2 = tu102_sec2_new, 2648 2612 }; 2649 2613 ··· 2676 2638 2677 2639 switch (index) { 2678 2640 #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break 2641 + _(ACR , device->acr , &device->acr->subdev); 2679 2642 _(BAR , device->bar , &device->bar->subdev); 2680 2643 _(VBIOS , device->bios , &device->bios->subdev); 2681 2644 _(BUS , device->bus , &device->bus->subdev); ··· 2697 2658 _(MXM , device->mxm , device->mxm); 2698 2659 _(PCI , device->pci , &device->pci->subdev); 2699 2660 _(PMU , device->pmu , &device->pmu->subdev); 2700 - _(SECBOOT , device->secboot , &device->secboot->subdev); 2701 2661 _(THERM , device->therm , &device->therm->subdev); 2702 2662 _(TIMER , device->timer , &device->timer->subdev); 2703 2663 _(TOP , device->top , &device->top->subdev); ··· 2741 2703 _(MSPDEC , device->mspdec , device->mspdec); 2742 2704 _(MSPPP , device->msppp , device->msppp); 2743 2705 _(MSVLD , device->msvld , device->msvld); 2744 - _(NVENC0 , device->nvenc[0], device->nvenc[0]); 2745 - _(NVENC1 , device->nvenc[1], device->nvenc[1]); 2746 - _(NVENC2 , device->nvenc[2], device->nvenc[2]); 2706 + _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine); 2707 + _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine); 2708 + _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine); 2747 2709 _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine); 2748 2710 _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine); 2749 2711 _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine); ··· 3182 3144 } \ 3183 3145 break 3184 3146 switch (i) { 3147 + _(NVKM_SUBDEV_ACR , acr); 3185 3148 _(NVKM_SUBDEV_BAR , bar); 3186 3149 _(NVKM_SUBDEV_VBIOS , bios); 3187 3150 _(NVKM_SUBDEV_BUS , bus); ··· 3203 3164 _(NVKM_SUBDEV_MXM , mxm); 3204 3165 _(NVKM_SUBDEV_PCI , pci); 3205 3166 _(NVKM_SUBDEV_PMU , pmu); 3206 - _(NVKM_SUBDEV_SECBOOT , secboot); 3207 3167 _(NVKM_SUBDEV_THERM , therm); 3208 3168 _(NVKM_SUBDEV_TIMER , timer); 3209 3169 _(NVKM_SUBDEV_TOP , top);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
··· 3 3 #define __NVKM_DEVICE_PRIV_H__ 4 4 #include <core/device.h> 5 5 6 + #include <subdev/acr.h> 6 7 #include <subdev/bar.h> 7 8 #include <subdev/bios.h> 8 9 #include <subdev/bus.h> ··· 28 27 #include <subdev/timer.h> 29 28 #include <subdev/top.h> 30 29 #include <subdev/volt.h> 31 - #include <subdev/secboot.h> 32 30 33 31 #include <engine/bsp.h> 34 32 #include <engine/ce.h>
+18 -6
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
··· 52 52 clk_set_rate(tdev->clk_pwr, 204000000); 53 53 udelay(10); 54 54 55 - reset_control_assert(tdev->rst); 56 - udelay(10); 57 - 58 55 if (!tdev->pdev->dev.pm_domain) { 56 + reset_control_assert(tdev->rst); 57 + udelay(10); 58 + 59 59 ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); 60 60 if (ret) 61 61 goto err_clamp; 62 62 udelay(10); 63 - } 64 63 65 - reset_control_deassert(tdev->rst); 66 - udelay(10); 64 + reset_control_deassert(tdev->rst); 65 + udelay(10); 66 + } 67 67 68 68 return 0; 69 69 ··· 279 279 struct nvkm_device **pdevice) 280 280 { 281 281 struct nvkm_device_tegra *tdev; 282 + unsigned long rate; 282 283 int ret; 283 284 284 285 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) ··· 306 305 if (IS_ERR(tdev->clk)) { 307 306 ret = PTR_ERR(tdev->clk); 308 307 goto free; 308 + } 309 + 310 + rate = clk_get_rate(tdev->clk); 311 + if (rate == 0) { 312 + ret = clk_set_rate(tdev->clk, ULONG_MAX); 313 + if (ret < 0) 314 + goto free; 315 + 316 + rate = clk_get_rate(tdev->clk); 317 + 318 + dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate); 309 319 } 310 320 311 321 if (func->require_ref_clk)
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
··· 365 365 * and it's better to have a failed modeset than that. 366 366 */ 367 367 for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { 368 - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { 368 + if (cfg->nr <= outp_nr && cfg->bw <= outp_bw) { 369 369 /* Try to respect sink limits too when selecting 370 370 * lowest link configuration. 371 371 */
+3
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
··· 36 36 nvkm-y += nvkm/engine/gr/gp102.o 37 37 nvkm-y += nvkm/engine/gr/gp104.o 38 38 nvkm-y += nvkm/engine/gr/gp107.o 39 + nvkm-y += nvkm/engine/gr/gp108.o 39 40 nvkm-y += nvkm/engine/gr/gp10b.o 40 41 nvkm-y += nvkm/engine/gr/gv100.o 42 + nvkm-y += nvkm/engine/gr/tu102.o 41 43 42 44 nvkm-y += nvkm/engine/gr/ctxnv40.o 43 45 nvkm-y += nvkm/engine/gr/ctxnv50.o ··· 62 60 nvkm-y += nvkm/engine/gr/ctxgp104.o 63 61 nvkm-y += nvkm/engine/gr/ctxgp107.o 64 62 nvkm-y += nvkm/engine/gr/ctxgv100.o 63 + nvkm-y += nvkm/engine/gr/ctxtu102.o
+12 -15
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
··· 1324 1324 void 1325 1325 gf100_grctx_generate_floorsweep(struct gf100_gr *gr) 1326 1326 { 1327 - struct nvkm_device *device = gr->base.engine.subdev.device; 1328 1327 const struct gf100_grctx_func *func = gr->func->grctx; 1329 - int gpc, sm, i, j; 1330 - u32 data; 1328 + int sm; 1331 1329 1332 1330 for (sm = 0; sm < gr->sm_nr; sm++) { 1333 1331 func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm); ··· 1333 1335 func->tpc_nr(gr, gr->sm[sm].gpc); 1334 1336 } 1335 1337 1336 - for (gpc = 0, i = 0; i < 4; i++) { 1337 - for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) 1338 - data |= gr->tpc_nr[gpc] << (j * 4); 1339 - nvkm_wr32(device, 0x406028 + (i * 4), data); 1340 - nvkm_wr32(device, 0x405870 + (i * 4), data); 1341 - } 1338 + gf100_gr_init_num_tpc_per_gpc(gr, false, true); 1339 + if (!func->skip_pd_num_tpc_per_gpc) 1340 + gf100_gr_init_num_tpc_per_gpc(gr, true, false); 1342 1341 1343 1342 if (func->r4060a8) 1344 1343 func->r4060a8(gr); ··· 1369 1374 1370 1375 nvkm_mc_unk260(device, 0); 1371 1376 1372 - if (!gr->fuc_sw_ctx) { 1377 + if (!gr->sw_ctx) { 1373 1378 gf100_gr_mmio(gr, grctx->hub); 1374 1379 gf100_gr_mmio(gr, grctx->gpc_0); 1375 1380 gf100_gr_mmio(gr, grctx->zcull); ··· 1377 1382 gf100_gr_mmio(gr, grctx->tpc); 1378 1383 gf100_gr_mmio(gr, grctx->ppc); 1379 1384 } else { 1380 - gf100_gr_mmio(gr, gr->fuc_sw_ctx); 1385 + gf100_gr_mmio(gr, gr->sw_ctx); 1381 1386 } 1382 1387 1383 1388 gf100_gr_wait_idle(gr); ··· 1396 1401 gf100_gr_wait_idle(gr); 1397 1402 1398 1403 if (grctx->r400088) grctx->r400088(gr, false); 1399 - if (gr->fuc_bundle) 1400 - gf100_gr_icmd(gr, gr->fuc_bundle); 1404 + if (gr->bundle) 1405 + gf100_gr_icmd(gr, gr->bundle); 1401 1406 else 1402 1407 gf100_gr_icmd(gr, grctx->icmd); 1403 1408 if (grctx->sw_veid_bundle_init) ··· 1406 1411 1407 1412 nvkm_wr32(device, 0x404154, idle_timeout); 1408 1413 1409 - if (gr->fuc_method) 1410 - gf100_gr_mthd(gr, gr->fuc_method); 1414 + if (gr->method) 1415 + gf100_gr_mthd(gr, gr->method); 1411 1416 else 1412 1417 gf100_gr_mthd(gr, grctx->mthd); 1413 1418 nvkm_mc_unk260(device, 1); ··· 1426 1431 grctx->r419a3c(gr); 1427 1432 if (grctx->r408840) 1428 1433 grctx->r408840(gr); 1434 + if (grctx->r419c0c) 1435 + grctx->r419c0c(gr); 1429 1436 } 1430 1437 1431 1438 #define CB_RESERVED 0x80000
+10
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
··· 57 57 /* floorsweeping */ 58 58 void (*sm_id)(struct gf100_gr *, int gpc, int tpc, int sm); 59 59 void (*tpc_nr)(struct gf100_gr *, int gpc); 60 + bool skip_pd_num_tpc_per_gpc; 60 61 void (*r4060a8)(struct gf100_gr *); 61 62 void (*rop_mapping)(struct gf100_gr *); 62 63 void (*alpha_beta_tables)(struct gf100_gr *); ··· 77 76 void (*r418e94)(struct gf100_gr *); 78 77 void (*r419a3c)(struct gf100_gr *); 79 78 void (*r408840)(struct gf100_gr *); 79 + void (*r419c0c)(struct gf100_gr *); 80 80 }; 81 81 82 82 extern const struct gf100_grctx_func gf100_grctx; ··· 154 152 extern const struct gf100_grctx_func gp107_grctx; 155 153 156 154 extern const struct gf100_grctx_func gv100_grctx; 155 + 156 + extern const struct gf100_grctx_func tu102_grctx; 157 + void gv100_grctx_unkn88c(struct gf100_gr *, bool); 158 + void gv100_grctx_generate_unkn(struct gf100_gr *); 159 + extern const struct gf100_gr_init gv100_grctx_init_sw_veid_bundle_init_0[]; 160 + void gv100_grctx_generate_attrib(struct gf100_grctx *); 161 + void gv100_grctx_generate_rop_mapping(struct gf100_gr *); 162 + void gv100_grctx_generate_r400088(struct gf100_gr *, bool); 157 163 158 164 /* context init value lists */ 159 165
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
··· 32 32 u32 idle_timeout; 33 33 int i; 34 34 35 - gf100_gr_mmio(gr, gr->fuc_sw_ctx); 35 + gf100_gr_mmio(gr, gr->sw_ctx); 36 36 37 37 gf100_gr_wait_idle(gr); 38 38 ··· 56 56 nvkm_wr32(device, 0x404154, idle_timeout); 57 57 gf100_gr_wait_idle(gr); 58 58 59 - gf100_gr_mthd(gr, gr->fuc_method); 59 + gf100_gr_mthd(gr, gr->method); 60 60 gf100_gr_wait_idle(gr); 61 61 62 - gf100_gr_icmd(gr, gr->fuc_bundle); 62 + gf100_gr_icmd(gr, gr->bundle); 63 63 grctx->pagepool(info); 64 64 grctx->bundle(info); 65 65 }
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
··· 29 29 u32 idle_timeout; 30 30 int i, tmp; 31 31 32 - gf100_gr_mmio(gr, gr->fuc_sw_ctx); 32 + gf100_gr_mmio(gr, gr->sw_ctx); 33 33 34 34 gf100_gr_wait_idle(gr); 35 35 ··· 59 59 nvkm_wr32(device, 0x404154, idle_timeout); 60 60 gf100_gr_wait_idle(gr); 61 61 62 - gf100_gr_mthd(gr, gr->fuc_method); 62 + gf100_gr_mthd(gr, gr->method); 63 63 gf100_gr_wait_idle(gr); 64 64 65 - gf100_gr_icmd(gr, gr->fuc_bundle); 65 + gf100_gr_icmd(gr, gr->bundle); 66 66 grctx->pagepool(info); 67 67 grctx->bundle(info); 68 68 }
+11 -12
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c
··· 25 25 * PGRAPH context implementation 26 26 ******************************************************************************/ 27 27 28 - static const struct gf100_gr_init 28 + const struct gf100_gr_init 29 29 gv100_grctx_init_sw_veid_bundle_init_0[] = { 30 30 { 0x00001000, 64, 0x00100000, 0x00000008 }, 31 31 { 0x00000941, 64, 0x00100000, 0x00000000 }, ··· 58 58 {} 59 59 }; 60 60 61 - static void 61 + void 62 62 gv100_grctx_generate_attrib(struct gf100_grctx *info) 63 63 { 64 64 struct gf100_gr *gr = info->gr; ··· 67 67 const u32 attrib = grctx->attrib_nr; 68 68 const u32 gfxp = grctx->gfxp_nr; 69 69 const int s = 12; 70 - const int max_batches = 0xffff; 71 70 u32 size = grctx->alpha_nr_max * gr->tpc_total; 72 71 u32 ao = 0; 73 72 u32 bo = ao + size; 74 73 int gpc, ppc, b, n = 0; 75 74 76 - size += grctx->gfxp_nr * gr->tpc_total; 77 - size = ((size * 0x20) + 128) & ~127; 75 + for (gpc = 0; gpc < gr->gpc_nr; gpc++) 76 + size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max; 77 + size = ((size * 0x20) + 127) & ~127; 78 78 b = mmio_vram(info, size, (1 << s), false); 79 79 80 80 mmio_refn(info, 0x418810, 0x80000000, s, b); ··· 84 84 mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7); 85 85 mmio_wr32(info, 0x405830, attrib); 86 86 mmio_wr32(info, 0x40585c, alpha); 87 - mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); 88 87 89 88 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 90 89 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { 91 90 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; 92 - const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; 93 - const u32 gs = gfxp * gr->ppc_tpc_nr[gpc][ppc]; 91 + const u32 bs = attrib * gr->ppc_tpc_max; 92 + const u32 gs = gfxp * gr->ppc_tpc_max; 94 93 const u32 u = 0x418ea0 + (n * 0x04); 95 94 const u32 o = PPC_UNIT(gpc, ppc, 0); 96 95 if (!(gr->ppc_mask[gpc] & (1 << ppc))) ··· 109 110 mmio_wr32(info, 0x41befc, 0x00000100); 110 111 } 111 112 112 - static void 113 + void 113 114 gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) 114 115 { 115 116 struct nvkm_device *device = gr->base.engine.subdev.device; ··· 146 147 gr->screen_tile_row_offset); 147 148 } 148 149 149 - static void 150 + void 150 151 gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on) 151 152 { 152 153 struct nvkm_device *device = gr->base.engine.subdev.device; ··· 162 163 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); 163 164 } 164 165 165 - static void 166 + void 166 167 gv100_grctx_generate_unkn(struct gf100_gr *gr) 167 168 { 168 169 struct nvkm_device *device = gr->base.engine.subdev.device; ··· 173 174 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); 174 175 } 175 176 176 - static void 177 + void 177 178 gv100_grctx_unkn88c(struct gf100_gr *gr, bool on) 178 179 { 179 180 struct nvkm_device *device = gr->base.engine.subdev.device;
+95
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxtu102.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "ctxgf100.h" 23 + 24 + static void 25 + tu102_grctx_generate_r419c0c(struct gf100_gr *gr) 26 + { 27 + struct nvkm_device *device = gr->base.engine.subdev.device; 28 + nvkm_mask(device, 0x419c0c, 0x80000000, 0x80000000); 29 + nvkm_mask(device, 0x40584c, 0x00000008, 0x00000000); 30 + nvkm_mask(device, 0x400080, 0x00000000, 0x00000000); 31 + } 32 + 33 + static void 34 + tu102_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) 35 + { 36 + struct nvkm_device *device = gr->base.engine.subdev.device; 37 + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm); 38 + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); 39 + } 40 + 41 + static const struct gf100_gr_init 42 + tu102_grctx_init_unknown_bundle_init_0[] = { 43 + { 0x00001000, 1, 0x00000001, 0x00000004 }, 44 + { 0x00002020, 64, 0x00000001, 0x00000000 }, 45 + { 0x0001e100, 1, 0x00000001, 0x00000001 }, 46 + {} 47 + }; 48 + 49 + static const struct gf100_gr_pack 50 + tu102_grctx_pack_sw_veid_bundle_init[] = { 51 + { gv100_grctx_init_sw_veid_bundle_init_0 }, 52 + { tu102_grctx_init_unknown_bundle_init_0 }, 53 + {} 54 + }; 55 + 56 + static void 57 + tu102_grctx_generate_attrib(struct gf100_grctx *info) 58 + { 59 + const u64 size = 0x80000; /*XXX: educated guess */ 60 + const int s = 8; 61 + const int b = mmio_vram(info, size, (1 << s), true); 62 + 63 + gv100_grctx_generate_attrib(info); 64 + 65 + mmio_refn(info, 0x408070, 0x00000000, s, b); 66 + mmio_wr32(info, 0x408074, size >> s); /*XXX: guess */ 67 + mmio_refn(info, 0x419034, 0x00000000, s, b); 68 + mmio_wr32(info, 0x408078, 0x00000000); 69 + } 70 + 71 + const struct gf100_grctx_func 72 + tu102_grctx = { 73 + .unkn88c = gv100_grctx_unkn88c, 74 + .main = gf100_grctx_generate_main, 75 + .unkn = gv100_grctx_generate_unkn, 76 + .sw_veid_bundle_init = tu102_grctx_pack_sw_veid_bundle_init, 77 + .bundle = gm107_grctx_generate_bundle, 78 + .bundle_size = 0x3000, 79 + .bundle_min_gpm_fifo_depth = 0x180, 80 + .bundle_token_limit = 0xa80, 81 + .pagepool = gp100_grctx_generate_pagepool, 82 + .pagepool_size = 0x20000, 83 + .attrib = tu102_grctx_generate_attrib, 84 + .attrib_nr_max = 0x800, 85 + .attrib_nr = 0x700, 86 + .alpha_nr_max = 0xc00, 87 + .alpha_nr = 0x800, 88 + .gfxp_nr = 0xfa8, 89 + .sm_id = tu102_grctx_generate_sm_id, 90 + .skip_pd_num_tpc_per_gpc = true, 91 + .rop_mapping = gv100_grctx_generate_rop_mapping, 92 + .r406500 = gm200_grctx_generate_r406500, 93 + .r400088 = gv100_grctx_generate_r400088, 94 + .r419c0c = tu102_grctx_generate_r419c0c, 95 + };
+404 -404
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
··· 441 441 0x020014fe, 442 442 0x12004002, 443 443 0xbd0002f6, 444 - 0x05c94104, 444 + 0x05ca4104, 445 445 0xbd0010fe, 446 446 0x07004024, 447 447 0xbd0002f6, ··· 460 460 0x01039204, 461 461 0x03090080, 462 462 0xbd0003f6, 463 - 0x87044204, 464 - 0xf6040040, 465 - 0x04bd0002, 466 - 0x00400402, 467 - 0x0002f603, 468 - 0x31f404bd, 469 - 0x96048e10, 470 - 0x00657e40, 471 - 0xc7feb200, 472 - 0x01b590f1, 473 - 0x1ff4f003, 474 - 0x01020fb5, 475 - 0x041fbb01, 476 - 0x800112b6, 477 - 0xf6010300, 478 - 0x04bd0001, 479 - 0x01040080, 480 - 0xbd0001f6, 481 - 0x01004104, 482 - 0xac7e020f, 483 - 0xbb7e0006, 484 - 0x100f0006, 485 - 0x0006fd7e, 486 - 0x98000e98, 487 - 0x207e010f, 488 - 0x14950001, 489 - 0xc0008008, 490 - 0x0004f601, 491 - 0x008004bd, 492 - 0x04f601c1, 493 - 0xb704bd00, 494 - 0xbb130030, 495 - 0xf5b6001f, 496 - 0xd3008002, 497 - 0x000ff601, 498 - 0x15b604bd, 499 - 0x0110b608, 500 - 0xb20814b6, 501 - 0x02687e1f, 502 - 0x001fbb00, 503 - 0x84020398, 504 - /* 0x041f: init_gpc */ 505 - 0xb8502000, 506 - 0x0008044e, 507 - 0x8f7e1fb2, 508 - 0x4eb80000, 509 - 0xbd00010c, 510 - 0x008f7ef4, 511 - 0x044eb800, 512 - 0x8f7e0001, 513 - 0x4eb80000, 514 - 0x0f000100, 515 - 0x008f7e02, 516 - 0x004eb800, 517 - /* 0x044e: init_gpc_wait */ 518 - 0x657e0008, 519 - 0xffc80000, 520 - 0xf90bf41f, 521 - 0x08044eb8, 522 - 0x00657e00, 523 - 0x001fbb00, 524 - 0x800040b7, 525 - 0xf40132b6, 526 - 0x000fb41b, 527 - 0x0006fd7e, 528 - 0xac7e000f, 529 - 0x00800006, 530 - 0x01f60201, 531 - 0xbd04bd00, 532 - 0x1f19f014, 533 - 0x02300080, 534 - 0xbd0001f6, 535 - /* 0x0491: wait */ 536 - 0x0028f404, 537 - /* 0x0497: main */ 538 - 0x0d0031f4, 539 - 0x00377e10, 540 - 0xf401f400, 541 - 0x4001e4b1, 542 - 0x00c71bf5, 543 - 0x99f094bd, 544 - 0x37008004, 545 - 0x0009f602, 546 - 0x008104bd, 547 - 0x11cf02c0, 548 - 0xc1008200, 549 - 0x0022cf02, 550 - 0xf41f13c8, 551 - 0x23c8770b, 552 - 0x550bf41f, 553 - 0x12b220f9, 554 - 0x99f094bd, 555 - 0x37008007, 556 - 0x0009f602, 557 - 0x32f404bd, 558 - 0x0231f401, 559 - 0x0008807e, 560 - 0x99f094bd, 561 - 0x17008007, 562 - 0x0009f602, 563 - 0x20fc04bd, 564 - 0x99f094bd, 565 - 0x37008006, 566 - 0x0009f602, 567 - 0x31f404bd, 568 - 0x08807e01, 569 - 0xf094bd00, 570 - 0x00800699, 571 - 0x09f60217, 572 - 0xf404bd00, 573 - /* 0x0522: chsw_prev_no_next */ 574 - 0x20f92f0e, 575 - 0x32f412b2, 576 - 0x0232f401, 577 - 0x0008807e, 578 - 0x008020fc, 579 - 0x02f602c0, 580 - 0xf404bd00, 581 - /* 0x053e: chsw_no_prev */ 582 - 0x23c8130e, 583 - 0x0d0bf41f, 584 - 0xf40131f4, 585 - 0x807e0232, 586 - /* 0x054e: chsw_done */ 587 - 0x01020008, 588 - 0x02c30080, 463 + 0x87048204, 464 + 0x04004000, 589 465 0xbd0002f6, 590 - 0xf094bd04, 591 - 0x00800499, 592 - 0x09f60217, 593 - 0xf504bd00, 594 - /* 0x056b: main_not_ctx_switch */ 595 - 0xb0ff300e, 596 - 0x1bf401e4, 597 - 0x7ef2b20c, 598 - 0xf4000820, 599 - /* 0x057a: main_not_ctx_chan */ 600 - 0xe4b0400e, 601 - 0x2c1bf402, 602 - 0x99f094bd, 603 - 0x37008007, 604 - 0x0009f602, 605 - 0x32f404bd, 606 - 0x0232f401, 607 - 0x0008807e, 608 - 0x99f094bd, 609 - 0x17008007, 610 - 0x0009f602, 611 - 0x0ef404bd, 612 - /* 0x05a9: main_not_ctx_save */ 613 - 0x10ef9411, 614 - 0x7e01f5f0, 615 - 0xf50002f8, 616 - /* 0x05b7: main_done */ 617 - 0xbdfee40e, 618 - 0x1f29f024, 619 - 0x02300080, 620 - 0xbd0002f6, 621 - 0xd20ef504, 622 - /* 0x05c9: ih */ 623 - 0xf900f9fe, 624 - 0x0188fe80, 625 - 0x90f980f9, 626 - 0xb0f9a0f9, 627 - 0xe0f9d0f9, 628 - 0x04bdf0f9, 629 - 0xcf02004a, 630 - 0xabc400aa, 631 - 0x230bf404, 632 - 0x004e100d, 633 - 0x00eecf1a, 634 - 0xcf19004f, 635 - 0x047e00ff, 636 - 0xb0b70000, 637 - 0x010e0400, 638 - 0xf61d0040, 639 - 0x04bd000e, 640 - /* 0x060c: ih_no_fifo */ 641 - 0x0100abe4, 642 - 0x0d0c0bf4, 643 - 0x40014e10, 644 - 0x0000047e, 645 - /* 0x061c: ih_no_ctxsw */ 646 - 0x0400abe4, 647 - 0x8e560bf4, 648 - 0x7e400708, 649 - 0xb2000065, 650 - 0x040080ff, 651 - 0x000ff602, 652 - 0x048e04bd, 653 - 0x657e4007, 654 - 0xffb20000, 655 - 0x02030080, 656 - 0xbd000ff6, 657 - 0x50fec704, 658 - 0x8f02ee94, 659 - 0xbb400700, 660 - 0x657e00ef, 661 - 0x00800000, 662 - 0x0ff60202, 663 - 0x0f04bd00, 664 - 0x02f87e03, 665 - 0x01004b00, 666 - 0x448ebfb2, 667 - 0x8f7e4001, 668 - /* 0x0676: ih_no_fwmthd */ 669 - 0x044b0000, 670 - 0xffb0bd05, 671 - 0x0bf4b4ab, 672 - 0x0700800c, 673 - 0x000bf603, 674 - /* 0x068a: ih_no_other */ 675 - 0x004004bd, 676 - 0x000af601, 677 - 0xf0fc04bd, 678 - 0xd0fce0fc, 679 - 0xa0fcb0fc, 680 - 0x80fc90fc, 681 - 0xfc0088fe, 682 - 0xf400fc80, 683 - 0x01f80032, 684 - /* 0x06ac: ctx_4170s */ 685 - 0xb210f5f0, 686 - 0x41708eff, 687 - 0x008f7e40, 688 - /* 0x06bb: ctx_4170w */ 689 - 0x8e00f800, 690 - 0x7e404170, 691 - 0xb2000065, 692 - 0x10f4f0ff, 693 - 0xf8f31bf4, 694 - /* 0x06cd: ctx_redswitch */ 695 - 0x02004e00, 696 - 0xf040e5f0, 697 - 0xe5f020e5, 698 - 0x85008010, 699 - 0x000ef601, 700 - 0x080f04bd, 701 - /* 0x06e4: ctx_redswitch_delay */ 702 - 0xf401f2b6, 703 - 0xe5f1fd1b, 704 - 0xe5f10400, 705 - 0x00800100, 706 - 0x0ef60185, 707 - 0xf804bd00, 708 - /* 0x06fd: ctx_86c */ 709 - 0x23008000, 710 - 0x000ff602, 711 - 0xffb204bd, 712 - 0x408a148e, 713 - 0x00008f7e, 714 - 0x8c8effb2, 715 - 0x8f7e41a8, 716 - 0x00f80000, 717 - /* 0x071c: ctx_mem */ 718 - 0x02840080, 719 - 0xbd000ff6, 720 - /* 0x0725: ctx_mem_wait */ 721 - 0x84008f04, 722 - 0x00ffcf02, 723 - 0xf405fffd, 724 - 0x00f8f61b, 725 - /* 0x0734: ctx_load */ 726 - 0x99f094bd, 727 - 0x37008005, 728 - 0x0009f602, 729 - 0x0c0a04bd, 730 - 0x0000b87e, 731 - 0x0080f4bd, 732 - 0x0ff60289, 466 + 0x40040204, 467 + 0x02f60300, 468 + 0xf404bd00, 469 + 0x048e1031, 470 + 0x657e4096, 471 + 0xfeb20000, 472 + 0xb590f1c7, 473 + 0xf4f00301, 474 + 0x020fb51f, 475 + 0x1fbb0101, 476 + 0x0112b604, 477 + 0x01030080, 478 + 0xbd0001f6, 479 + 0x04008004, 480 + 0x0001f601, 481 + 0x004104bd, 482 + 0x7e020f01, 483 + 0x7e0006ad, 484 + 0x0f0006bc, 485 + 0x06fe7e10, 486 + 0x000e9800, 487 + 0x7e010f98, 488 + 0x95000120, 489 + 0x00800814, 490 + 0x04f601c0, 733 491 0x8004bd00, 734 - 0xf602c100, 735 - 0x04bd0002, 736 - 0x02830080, 737 - 0xbd0002f6, 738 - 0x7e070f04, 739 - 0x8000071c, 492 + 0xf601c100, 493 + 0x04bd0004, 494 + 0x130030b7, 495 + 0xb6001fbb, 496 + 0x008002f5, 497 + 0x0ff601d3, 498 + 0xb604bd00, 499 + 0x10b60815, 500 + 0x0814b601, 501 + 0x687e1fb2, 502 + 0x1fbb0002, 503 + 0x02039800, 504 + 0x50200084, 505 + /* 0x0420: init_gpc */ 506 + 0x08044eb8, 507 + 0x7e1fb200, 508 + 0xb800008f, 509 + 0x00010c4e, 510 + 0x8f7ef4bd, 511 + 0x4eb80000, 512 + 0x7e000104, 513 + 0xb800008f, 514 + 0x0001004e, 515 + 0x8f7e020f, 516 + 0x4eb80000, 517 + /* 0x044f: init_gpc_wait */ 518 + 0x7e000800, 519 + 0xc8000065, 520 + 0x0bf41fff, 521 + 0x044eb8f9, 522 + 0x657e0008, 523 + 0x1fbb0000, 524 + 0x0040b700, 525 + 0x0132b680, 526 + 0x0fb41bf4, 527 + 0x06fe7e00, 528 + 0x7e000f00, 529 + 0x800006ad, 530 + 0xf6020100, 531 + 0x04bd0001, 532 + 0x19f014bd, 533 + 0x3000801f, 534 + 0x0001f602, 535 + /* 0x0492: wait */ 536 + 0x28f404bd, 537 + 0x0031f400, 538 + /* 0x0498: main */ 539 + 0x377e100d, 540 + 0x01f40000, 541 + 0x01e4b1f4, 542 + 0xc71bf540, 543 + 0xf094bd00, 544 + 0x00800499, 545 + 0x09f60237, 546 + 0x8104bd00, 547 + 0xcf02c000, 548 + 0x00820011, 549 + 0x22cf02c1, 550 + 0x1f13c800, 551 + 0xc8770bf4, 552 + 0x0bf41f23, 553 + 0xb220f955, 554 + 0xf094bd12, 555 + 0x00800799, 556 + 0x09f60237, 557 + 0xf404bd00, 558 + 0x31f40132, 559 + 0x08817e02, 560 + 0xf094bd00, 561 + 0x00800799, 562 + 0x09f60217, 563 + 0xfc04bd00, 564 + 0xf094bd20, 565 + 0x00800699, 566 + 0x09f60237, 567 + 0xf404bd00, 568 + 0x817e0131, 569 + 0x94bd0008, 570 + 0x800699f0, 571 + 0xf6021700, 572 + 0x04bd0009, 573 + /* 0x0523: chsw_prev_no_next */ 574 + 0xf92f0ef4, 575 + 0xf412b220, 576 + 0x32f40132, 577 + 0x08817e02, 578 + 0x8020fc00, 740 579 0xf602c000, 741 580 0x04bd0002, 742 - 0xf0000bfe, 743 - 0x24b61f2a, 744 - 0x0220b604, 745 - 0x99f094bd, 746 - 0x37008008, 747 - 0x0009f602, 748 - 0x008004bd, 749 - 0x02f60281, 750 - 0xd204bd00, 751 - 0x80000000, 752 - 0x800225f0, 753 - 0xf6028800, 754 - 0x04bd0002, 755 - 0x00421001, 756 - 0x0223f002, 757 - 0xf80512fa, 758 - 0xf094bd03, 759 - 0x00800899, 581 + /* 0x053f: chsw_no_prev */ 582 + 0xc8130ef4, 583 + 0x0bf41f23, 584 + 0x0131f40d, 585 + 0x7e0232f4, 586 + /* 0x054f: chsw_done */ 587 + 0x02000881, 588 + 0xc3008001, 589 + 0x0002f602, 590 + 0x94bd04bd, 591 + 0x800499f0, 592 + 0xf6021700, 593 + 0x04bd0009, 594 + 0xff300ef5, 595 + /* 0x056c: main_not_ctx_switch */ 596 + 0xf401e4b0, 597 + 0xf2b20c1b, 598 + 0x0008217e, 599 + /* 0x057b: main_not_ctx_chan */ 600 + 0xb0400ef4, 601 + 0x1bf402e4, 602 + 0xf094bd2c, 603 + 0x00800799, 604 + 0x09f60237, 605 + 0xf404bd00, 606 + 0x32f40132, 607 + 0x08817e02, 608 + 0xf094bd00, 609 + 0x00800799, 760 610 0x09f60217, 761 - 0x9804bd00, 762 - 0x14b68101, 763 - 0x80029818, 764 - 0xfd0825b6, 765 - 0x01b50512, 766 - 0xf094bd16, 767 - 0x00800999, 611 + 0xf404bd00, 612 + /* 0x05aa: main_not_ctx_save */ 613 + 0xef94110e, 614 + 0x01f5f010, 615 + 0x0002f87e, 616 + 0xfee40ef5, 617 + /* 0x05b8: main_done */ 618 + 0x29f024bd, 619 + 0x3000801f, 620 + 0x0002f602, 621 + 0x0ef504bd, 622 + /* 0x05ca: ih */ 623 + 0x00f9fed2, 624 + 0x88fe80f9, 625 + 0xf980f901, 626 + 0xf9a0f990, 627 + 0xf9d0f9b0, 628 + 0xbdf0f9e0, 629 + 0x02004a04, 630 + 0xc400aacf, 631 + 0x0bf404ab, 632 + 0x4e100d23, 633 + 0xeecf1a00, 634 + 0x19004f00, 635 + 0x7e00ffcf, 636 + 0xb7000004, 637 + 0x0e0400b0, 638 + 0x1d004001, 639 + 0xbd000ef6, 640 + /* 0x060d: ih_no_fifo */ 641 + 0x00abe404, 642 + 0x0c0bf401, 643 + 0x014e100d, 644 + 0x00047e40, 645 + /* 0x061d: ih_no_ctxsw */ 646 + 0x00abe400, 647 + 0x560bf404, 648 + 0x4007088e, 649 + 0x0000657e, 650 + 0x0080ffb2, 651 + 0x0ff60204, 652 + 0x8e04bd00, 653 + 0x7e400704, 654 + 0xb2000065, 655 + 0x030080ff, 656 + 0x000ff602, 657 + 0xfec704bd, 658 + 0x02ee9450, 659 + 0x4007008f, 660 + 0x7e00efbb, 661 + 0x80000065, 662 + 0xf6020200, 663 + 0x04bd000f, 664 + 0xf87e030f, 665 + 0x004b0002, 666 + 0x8ebfb201, 667 + 0x7e400144, 668 + /* 0x0677: ih_no_fwmthd */ 669 + 0x4b00008f, 670 + 0xb0bd0504, 671 + 0xf4b4abff, 672 + 0x00800c0b, 673 + 0x0bf60307, 674 + /* 0x068b: ih_no_other */ 675 + 0x4004bd00, 676 + 0x0af60100, 677 + 0xfc04bd00, 678 + 0xfce0fcf0, 679 + 0xfcb0fcd0, 680 + 0xfc90fca0, 681 + 0x0088fe80, 682 + 0x00fc80fc, 683 + 0xf80032f4, 684 + /* 0x06ad: ctx_4170s */ 685 + 0x10f5f001, 686 + 0x708effb2, 687 + 0x8f7e4041, 688 + 0x00f80000, 689 + /* 0x06bc: ctx_4170w */ 690 + 0x4041708e, 691 + 0x0000657e, 692 + 0xf4f0ffb2, 693 + 0xf31bf410, 694 + /* 0x06ce: ctx_redswitch */ 695 + 0x004e00f8, 696 + 0x40e5f002, 697 + 0xf020e5f0, 698 + 0x008010e5, 699 + 0x0ef60185, 700 + 0x0f04bd00, 701 + /* 0x06e5: ctx_redswitch_delay */ 702 + 0x01f2b608, 703 + 0xf1fd1bf4, 704 + 0xf10400e5, 705 + 0x800100e5, 706 + 0xf6018500, 707 + 0x04bd000e, 708 + /* 0x06fe: ctx_86c */ 709 + 0x008000f8, 710 + 0x0ff60223, 711 + 0xb204bd00, 712 + 0x8a148eff, 713 + 0x008f7e40, 714 + 0x8effb200, 715 + 0x7e41a88c, 716 + 0xf800008f, 717 + /* 0x071d: ctx_mem */ 718 + 0x84008000, 719 + 0x000ff602, 720 + /* 0x0726: ctx_mem_wait */ 721 + 0x008f04bd, 722 + 0xffcf0284, 723 + 0x05fffd00, 724 + 0xf8f61bf4, 725 + /* 0x0735: ctx_load */ 726 + 0xf094bd00, 727 + 0x00800599, 728 + 0x09f60237, 729 + 0x0a04bd00, 730 + 0x00b87e0c, 731 + 0x80f4bd00, 732 + 0xf6028900, 733 + 0x04bd000f, 734 + 0x02c10080, 735 + 0xbd0002f6, 736 + 0x83008004, 737 + 0x0002f602, 738 + 0x070f04bd, 739 + 0x00071d7e, 740 + 0x02c00080, 741 + 0xbd0002f6, 742 + 0x000bfe04, 743 + 0xb61f2af0, 744 + 0x20b60424, 745 + 0xf094bd02, 746 + 0x00800899, 768 747 0x09f60237, 769 748 0x8004bd00, 770 749 0xf6028100, 771 - 0x04bd0001, 772 - 0x00800102, 773 - 0x02f60288, 774 - 0x4104bd00, 775 - 0x13f00100, 776 - 0x0501fa06, 750 + 0x04bd0002, 751 + 0x000000d2, 752 + 0x0225f080, 753 + 0x02880080, 754 + 0xbd0002f6, 755 + 0x42100104, 756 + 0x23f00200, 757 + 0x0512fa02, 777 758 0x94bd03f8, 778 - 0x800999f0, 759 + 0x800899f0, 779 760 0xf6021700, 780 761 0x04bd0009, 781 - 0x99f094bd, 782 - 0x17008005, 783 - 0x0009f602, 784 - 0x00f804bd, 785 - /* 0x0820: ctx_chan */ 786 - 0x0007347e, 787 - 0xb87e0c0a, 788 - 0x050f0000, 789 - 0x00071c7e, 790 - /* 0x0832: ctx_mmio_exec */ 791 - 0x039800f8, 792 - 0x81008041, 793 - 0x0003f602, 794 - 0x34bd04bd, 795 - /* 0x0840: ctx_mmio_loop */ 796 - 0xf4ff34c4, 797 - 0x00450e1b, 798 - 0x0653f002, 799 - 0xf80535fa, 800 - /* 0x0851: ctx_mmio_pull */ 801 - 0x804e9803, 802 - 0x7e814f98, 803 - 0xb600008f, 804 - 0x12b60830, 805 - 0xdf1bf401, 806 - /* 0x0864: ctx_mmio_done */ 807 - 0x80160398, 808 - 0xf6028100, 809 - 0x04bd0003, 810 - 0x414000b5, 811 - 0x13f00100, 812 - 0x0601fa06, 813 - 0x00f803f8, 814 - /* 0x0880: ctx_xfer */ 815 - 0x0080040e, 816 - 0x0ef60302, 817 - /* 0x088b: ctx_xfer_idle */ 818 - 0x8e04bd00, 819 - 0xcf030000, 820 - 0xe4f100ee, 821 - 0x1bf42000, 822 - 0x0611f4f5, 823 - /* 0x089f: ctx_xfer_pre */ 824 - 0x0f0c02f4, 825 - 0x06fd7e10, 826 - 0x1b11f400, 827 - /* 0x08a8: ctx_xfer_pre_load */ 828 - 0xac7e020f, 829 - 0xbb7e0006, 830 - 0xcd7e0006, 831 - 0xf4bd0006, 832 - 0x0006ac7e, 833 - 0x0007347e, 834 - /* 0x08c0: ctx_xfer_exec */ 835 - 0xbd160198, 836 - 0x05008024, 837 - 0x0002f601, 838 - 0x1fb204bd, 839 - 0x41a5008e, 840 - 0x00008f7e, 841 - 0xf001fcf0, 842 - 0x24b6022c, 843 - 0x05f2fd01, 844 - 0x048effb2, 845 - 0x8f7e41a5, 846 - 0x167e0000, 847 - 0x24bd0002, 848 - 0x0247fc80, 849 - 0xbd0002f6, 850 - 0x012cf004, 851 - 0x800320b6, 852 - 0xf6024afc, 762 + 0xb6810198, 763 + 0x02981814, 764 + 0x0825b680, 765 + 0xb50512fd, 766 + 0x94bd1601, 767 + 0x800999f0, 768 + 0xf6023700, 769 + 0x04bd0009, 770 + 0x02810080, 771 + 0xbd0001f6, 772 + 0x80010204, 773 + 0xf6028800, 853 774 0x04bd0002, 854 - 0xf001acf0, 855 - 0x000b06a5, 856 - 0x98000c98, 857 - 0x000e010d, 858 - 0x00013d7e, 859 - 0xec7e080a, 860 - 0x0a7e0000, 861 - 0x01f40002, 862 - 0x7e0c0a12, 775 + 0xf0010041, 776 + 0x01fa0613, 777 + 0xbd03f805, 778 + 0x0999f094, 779 + 0x02170080, 780 + 0xbd0009f6, 781 + 0xf094bd04, 782 + 0x00800599, 783 + 0x09f60217, 784 + 0xf804bd00, 785 + /* 0x0821: ctx_chan */ 786 + 0x07357e00, 787 + 0x7e0c0a00, 863 788 0x0f0000b8, 864 - 0x071c7e05, 865 - 0x2d02f400, 866 - /* 0x093c: ctx_xfer_post */ 867 - 0xac7e020f, 868 - 0xf4bd0006, 869 - 0x0006fd7e, 870 - 0x0002277e, 871 - 0x0006bb7e, 872 - 0xac7ef4bd, 789 + 0x071d7e05, 790 + /* 0x0833: ctx_mmio_exec */ 791 + 0x9800f800, 792 + 0x00804103, 793 + 0x03f60281, 794 + 0xbd04bd00, 795 + /* 0x0841: ctx_mmio_loop */ 796 + 0xff34c434, 797 + 0x450e1bf4, 798 + 0x53f00200, 799 + 0x0535fa06, 800 + /* 0x0852: ctx_mmio_pull */ 801 + 0x4e9803f8, 802 + 0x814f9880, 803 + 0x00008f7e, 804 + 0xb60830b6, 805 + 0x1bf40112, 806 + /* 0x0865: ctx_mmio_done */ 807 + 0x160398df, 808 + 0x02810080, 809 + 0xbd0003f6, 810 + 0x4000b504, 811 + 0xf0010041, 812 + 0x01fa0613, 813 + 0xf803f806, 814 + /* 0x0881: ctx_xfer */ 815 + 0x80040e00, 816 + 0xf6030200, 817 + 0x04bd000e, 818 + /* 0x088c: ctx_xfer_idle */ 819 + 0x0300008e, 820 + 0xf100eecf, 821 + 0xf42000e4, 822 + 0x11f4f51b, 823 + 0x0c02f406, 824 + /* 0x08a0: ctx_xfer_pre */ 825 + 0xfe7e100f, 873 826 0x11f40006, 874 - 0x40019810, 875 - 0xf40511fd, 876 - 0x327e070b, 877 - /* 0x0966: ctx_xfer_no_post_mmio */ 878 - /* 0x0966: ctx_xfer_done */ 879 - 0x00f80008, 827 + /* 0x08a9: ctx_xfer_pre_load */ 828 + 0x7e020f1b, 829 + 0x7e0006ad, 830 + 0x7e0006bc, 831 + 0xbd0006ce, 832 + 0x06ad7ef4, 833 + 0x07357e00, 834 + /* 0x08c1: ctx_xfer_exec */ 835 + 0x16019800, 836 + 0x008024bd, 837 + 0x02f60105, 838 + 0xb204bd00, 839 + 0xa5008e1f, 840 + 0x008f7e41, 841 + 0x01fcf000, 842 + 0xb6022cf0, 843 + 0xf2fd0124, 844 + 0x8effb205, 845 + 0x7e41a504, 846 + 0x7e00008f, 847 + 0xbd000216, 848 + 0x47fc8024, 849 + 0x0002f602, 850 + 0x2cf004bd, 851 + 0x0320b601, 852 + 0x024afc80, 853 + 0xbd0002f6, 854 + 0x01acf004, 855 + 0x0b06a5f0, 856 + 0x000c9800, 857 + 0x0e010d98, 858 + 0x013d7e00, 859 + 0x7e080a00, 860 + 0x7e0000ec, 861 + 0xf400020a, 862 + 0x0c0a1201, 863 + 0x0000b87e, 864 + 0x1d7e050f, 865 + 0x02f40007, 866 + /* 0x093d: ctx_xfer_post */ 867 + 0x7e020f2d, 868 + 0xbd0006ad, 869 + 0x06fe7ef4, 870 + 0x02277e00, 871 + 0x06bc7e00, 872 + 0x7ef4bd00, 873 + 0xf40006ad, 874 + 0x01981011, 875 + 0x0511fd40, 876 + 0x7e070bf4, 877 + /* 0x0967: ctx_xfer_no_post_mmio */ 878 + /* 0x0967: ctx_xfer_done */ 879 + 0xf8000833, 880 880 0x00000000, 881 881 0x00000000, 882 882 0x00000000,
+404 -404
drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
··· 441 441 0x020014fe, 442 442 0x12004002, 443 443 0xbd0002f6, 444 - 0x05c94104, 444 + 0x05ca4104, 445 445 0xbd0010fe, 446 446 0x07004024, 447 447 0xbd0002f6, ··· 460 460 0x01039204, 461 461 0x03090080, 462 462 0xbd0003f6, 463 - 0x87044204, 464 - 0xf6040040, 465 - 0x04bd0002, 466 - 0x00400402, 467 - 0x0002f603, 468 - 0x31f404bd, 469 - 0x96048e10, 470 - 0x00657e40, 471 - 0xc7feb200, 472 - 0x01b590f1, 473 - 0x1ff4f003, 474 - 0x01020fb5, 475 - 0x041fbb01, 476 - 0x800112b6, 477 - 0xf6010300, 478 - 0x04bd0001, 479 - 0x01040080, 480 - 0xbd0001f6, 481 - 0x01004104, 482 - 0xac7e020f, 483 - 0xbb7e0006, 484 - 0x100f0006, 485 - 0x0006fd7e, 486 - 0x98000e98, 487 - 0x207e010f, 488 - 0x14950001, 489 - 0xc0008008, 490 - 0x0004f601, 491 - 0x008004bd, 492 - 0x04f601c1, 493 - 0xb704bd00, 494 - 0xbb130030, 495 - 0xf5b6001f, 496 - 0xd3008002, 497 - 0x000ff601, 498 - 0x15b604bd, 499 - 0x0110b608, 500 - 0xb20814b6, 501 - 0x02687e1f, 502 - 0x001fbb00, 503 - 0x84020398, 504 - /* 0x041f: init_gpc */ 505 - 0xb8502000, 506 - 0x0008044e, 507 - 0x8f7e1fb2, 508 - 0x4eb80000, 509 - 0xbd00010c, 510 - 0x008f7ef4, 511 - 0x044eb800, 512 - 0x8f7e0001, 513 - 0x4eb80000, 514 - 0x0f000100, 515 - 0x008f7e02, 516 - 0x004eb800, 517 - /* 0x044e: init_gpc_wait */ 518 - 0x657e0008, 519 - 0xffc80000, 520 - 0xf90bf41f, 521 - 0x08044eb8, 522 - 0x00657e00, 523 - 0x001fbb00, 524 - 0x800040b7, 525 - 0xf40132b6, 526 - 0x000fb41b, 527 - 0x0006fd7e, 528 - 0xac7e000f, 529 - 0x00800006, 530 - 0x01f60201, 531 - 0xbd04bd00, 532 - 0x1f19f014, 533 - 0x02300080, 534 - 0xbd0001f6, 535 - /* 0x0491: wait */ 536 - 0x0028f404, 537 - /* 0x0497: main */ 538 - 0x0d0031f4, 539 - 0x00377e10, 540 - 0xf401f400, 541 - 0x4001e4b1, 542 - 0x00c71bf5, 543 - 0x99f094bd, 544 - 0x37008004, 545 - 0x0009f602, 546 - 0x008104bd, 547 - 0x11cf02c0, 548 - 0xc1008200, 549 - 0x0022cf02, 550 - 0xf41f13c8, 551 - 0x23c8770b, 552 - 0x550bf41f, 553 - 0x12b220f9, 554 - 0x99f094bd, 555 - 0x37008007, 556 - 0x0009f602, 557 - 0x32f404bd, 558 - 0x0231f401, 559 - 0x0008807e, 560 - 0x99f094bd, 561 - 0x17008007, 562 - 0x0009f602, 563 - 0x20fc04bd, 564 - 0x99f094bd, 565 - 0x37008006, 566 - 0x0009f602, 567 - 0x31f404bd, 568 - 0x08807e01, 569 - 0xf094bd00, 570 - 0x00800699, 571 - 0x09f60217, 572 - 0xf404bd00, 573 - /* 0x0522: chsw_prev_no_next */ 574 - 0x20f92f0e, 575 - 0x32f412b2, 576 - 0x0232f401, 577 - 0x0008807e, 578 - 0x008020fc, 579 - 0x02f602c0, 580 - 0xf404bd00, 581 - /* 0x053e: chsw_no_prev */ 582 - 0x23c8130e, 583 - 0x0d0bf41f, 584 - 0xf40131f4, 585 - 0x807e0232, 586 - /* 0x054e: chsw_done */ 587 - 0x01020008, 588 - 0x02c30080, 463 + 0x87048204, 464 + 0x04004000, 589 465 0xbd0002f6, 590 - 0xf094bd04, 591 - 0x00800499, 592 - 0x09f60217, 593 - 0xf504bd00, 594 - /* 0x056b: main_not_ctx_switch */ 595 - 0xb0ff300e, 596 - 0x1bf401e4, 597 - 0x7ef2b20c, 598 - 0xf4000820, 599 - /* 0x057a: main_not_ctx_chan */ 600 - 0xe4b0400e, 601 - 0x2c1bf402, 602 - 0x99f094bd, 603 - 0x37008007, 604 - 0x0009f602, 605 - 0x32f404bd, 606 - 0x0232f401, 607 - 0x0008807e, 608 - 0x99f094bd, 609 - 0x17008007, 610 - 0x0009f602, 611 - 0x0ef404bd, 612 - /* 0x05a9: main_not_ctx_save */ 613 - 0x10ef9411, 614 - 0x7e01f5f0, 615 - 0xf50002f8, 616 - /* 0x05b7: main_done */ 617 - 0xbdfee40e, 618 - 0x1f29f024, 619 - 0x02300080, 620 - 0xbd0002f6, 621 - 0xd20ef504, 622 - /* 0x05c9: ih */ 623 - 0xf900f9fe, 624 - 0x0188fe80, 625 - 0x90f980f9, 626 - 0xb0f9a0f9, 627 - 0xe0f9d0f9, 628 - 0x04bdf0f9, 629 - 0xcf02004a, 630 - 0xabc400aa, 631 - 0x230bf404, 632 - 0x004e100d, 633 - 0x00eecf1a, 634 - 0xcf19004f, 635 - 0x047e00ff, 636 - 0xb0b70000, 637 - 0x010e0400, 638 - 0xf61d0040, 639 - 0x04bd000e, 640 - /* 0x060c: ih_no_fifo */ 641 - 0x0100abe4, 642 - 0x0d0c0bf4, 643 - 0x40014e10, 644 - 0x0000047e, 645 - /* 0x061c: ih_no_ctxsw */ 646 - 0x0400abe4, 647 - 0x8e560bf4, 648 - 0x7e400708, 649 - 0xb2000065, 650 - 0x040080ff, 651 - 0x000ff602, 652 - 0x048e04bd, 653 - 0x657e4007, 654 - 0xffb20000, 655 - 0x02030080, 656 - 0xbd000ff6, 657 - 0x50fec704, 658 - 0x8f02ee94, 659 - 0xbb400700, 660 - 0x657e00ef, 661 - 0x00800000, 662 - 0x0ff60202, 663 - 0x0f04bd00, 664 - 0x02f87e03, 665 - 0x01004b00, 666 - 0x448ebfb2, 667 - 0x8f7e4001, 668 - /* 0x0676: ih_no_fwmthd */ 669 - 0x044b0000, 670 - 0xffb0bd05, 671 - 0x0bf4b4ab, 672 - 0x0700800c, 673 - 0x000bf603, 674 - /* 0x068a: ih_no_other */ 675 - 0x004004bd, 676 - 0x000af601, 677 - 0xf0fc04bd, 678 - 0xd0fce0fc, 679 - 0xa0fcb0fc, 680 - 0x80fc90fc, 681 - 0xfc0088fe, 682 - 0xf400fc80, 683 - 0x01f80032, 684 - /* 0x06ac: ctx_4170s */ 685 - 0xb210f5f0, 686 - 0x41708eff, 687 - 0x008f7e40, 688 - /* 0x06bb: ctx_4170w */ 689 - 0x8e00f800, 690 - 0x7e404170, 691 - 0xb2000065, 692 - 0x10f4f0ff, 693 - 0xf8f31bf4, 694 - /* 0x06cd: ctx_redswitch */ 695 - 0x02004e00, 696 - 0xf040e5f0, 697 - 0xe5f020e5, 698 - 0x85008010, 699 - 0x000ef601, 700 - 0x080f04bd, 701 - /* 0x06e4: ctx_redswitch_delay */ 702 - 0xf401f2b6, 703 - 0xe5f1fd1b, 704 - 0xe5f10400, 705 - 0x00800100, 706 - 0x0ef60185, 707 - 0xf804bd00, 708 - /* 0x06fd: ctx_86c */ 709 - 0x23008000, 710 - 0x000ff602, 711 - 0xffb204bd, 712 - 0x408a148e, 713 - 0x00008f7e, 714 - 0x8c8effb2, 715 - 0x8f7e41a8, 716 - 0x00f80000, 717 - /* 0x071c: ctx_mem */ 718 - 0x02840080, 719 - 0xbd000ff6, 720 - /* 0x0725: ctx_mem_wait */ 721 - 0x84008f04, 722 - 0x00ffcf02, 723 - 0xf405fffd, 724 - 0x00f8f61b, 725 - /* 0x0734: ctx_load */ 726 - 0x99f094bd, 727 - 0x37008005, 728 - 0x0009f602, 729 - 0x0c0a04bd, 730 - 0x0000b87e, 731 - 0x0080f4bd, 732 - 0x0ff60289, 466 + 0x40040204, 467 + 0x02f60300, 468 + 0xf404bd00, 469 + 0x048e1031, 470 + 0x657e4096, 471 + 0xfeb20000, 472 + 0xb590f1c7, 473 + 0xf4f00301, 474 + 0x020fb51f, 475 + 0x1fbb0101, 476 + 0x0112b604, 477 + 0x01030080, 478 + 0xbd0001f6, 479 + 0x04008004, 480 + 0x0001f601, 481 + 0x004104bd, 482 + 0x7e020f01, 483 + 0x7e0006ad, 484 + 0x0f0006bc, 485 + 0x06fe7e10, 486 + 0x000e9800, 487 + 0x7e010f98, 488 + 0x95000120, 489 + 0x00800814, 490 + 0x04f601c0, 733 491 0x8004bd00, 734 - 0xf602c100, 735 - 0x04bd0002, 736 - 0x02830080, 737 - 0xbd0002f6, 738 - 0x7e070f04, 739 - 0x8000071c, 492 + 0xf601c100, 493 + 0x04bd0004, 494 + 0x130030b7, 495 + 0xb6001fbb, 496 + 0x008002f5, 497 + 0x0ff601d3, 498 + 0xb604bd00, 499 + 0x10b60815, 500 + 0x0814b601, 501 + 0x687e1fb2, 502 + 0x1fbb0002, 503 + 0x02039800, 504 + 0x50200084, 505 + /* 0x0420: init_gpc */ 506 + 0x08044eb8, 507 + 0x7e1fb200, 508 + 0xb800008f, 509 + 0x00010c4e, 510 + 0x8f7ef4bd, 511 + 0x4eb80000, 512 + 0x7e000104, 513 + 0xb800008f, 514 + 0x0001004e, 515 + 0x8f7e020f, 516 + 0x4eb80000, 517 + /* 0x044f: init_gpc_wait */ 518 + 0x7e000800, 519 + 0xc8000065, 520 + 0x0bf41fff, 521 + 0x044eb8f9, 522 + 0x657e0008, 523 + 0x1fbb0000, 524 + 0x0040b700, 525 + 0x0132b680, 526 + 0x0fb41bf4, 527 + 0x06fe7e00, 528 + 0x7e000f00, 529 + 0x800006ad, 530 + 0xf6020100, 531 + 0x04bd0001, 532 + 0x19f014bd, 533 + 0x3000801f, 534 + 0x0001f602, 535 + /* 0x0492: wait */ 536 + 0x28f404bd, 537 + 0x0031f400, 538 + /* 0x0498: main */ 539 + 0x377e100d, 540 + 0x01f40000, 541 + 0x01e4b1f4, 542 + 0xc71bf540, 543 + 0xf094bd00, 544 + 0x00800499, 545 + 0x09f60237, 546 + 0x8104bd00, 547 + 0xcf02c000, 548 + 0x00820011, 549 + 0x22cf02c1, 550 + 0x1f13c800, 551 + 0xc8770bf4, 552 + 0x0bf41f23, 553 + 0xb220f955, 554 + 0xf094bd12, 555 + 0x00800799, 556 + 0x09f60237, 557 + 0xf404bd00, 558 + 0x31f40132, 559 + 0x08817e02, 560 + 0xf094bd00, 561 + 0x00800799, 562 + 0x09f60217, 563 + 0xfc04bd00, 564 + 0xf094bd20, 565 + 0x00800699, 566 + 0x09f60237, 567 + 0xf404bd00, 568 + 0x817e0131, 569 + 0x94bd0008, 570 + 0x800699f0, 571 + 0xf6021700, 572 + 0x04bd0009, 573 + /* 0x0523: chsw_prev_no_next */ 574 + 0xf92f0ef4, 575 + 0xf412b220, 576 + 0x32f40132, 577 + 0x08817e02, 578 + 0x8020fc00, 740 579 0xf602c000, 741 580 0x04bd0002, 742 - 0xf0000bfe, 743 - 0x24b61f2a, 744 - 0x0220b604, 745 - 0x99f094bd, 746 - 0x37008008, 747 - 0x0009f602, 748 - 0x008004bd, 749 - 0x02f60281, 750 - 0xd204bd00, 751 - 0x80000000, 752 - 0x800225f0, 753 - 0xf6028800, 754 - 0x04bd0002, 755 - 0x00421001, 756 - 0x0223f002, 757 - 0xf80512fa, 758 - 0xf094bd03, 759 - 0x00800899, 581 + /* 0x053f: chsw_no_prev */ 582 + 0xc8130ef4, 583 + 0x0bf41f23, 584 + 0x0131f40d, 585 + 0x7e0232f4, 586 + /* 0x054f: chsw_done */ 587 + 0x02000881, 588 + 0xc3008001, 589 + 0x0002f602, 590 + 0x94bd04bd, 591 + 0x800499f0, 592 + 0xf6021700, 593 + 0x04bd0009, 594 + 0xff300ef5, 595 + /* 0x056c: main_not_ctx_switch */ 596 + 0xf401e4b0, 597 + 0xf2b20c1b, 598 + 0x0008217e, 599 + /* 0x057b: main_not_ctx_chan */ 600 + 0xb0400ef4, 601 + 0x1bf402e4, 602 + 0xf094bd2c, 603 + 0x00800799, 604 + 0x09f60237, 605 + 0xf404bd00, 606 + 0x32f40132, 607 + 0x08817e02, 608 + 0xf094bd00, 609 + 0x00800799, 760 610 0x09f60217, 761 - 0x9804bd00, 762 - 0x14b68101, 763 - 0x80029818, 764 - 0xfd0825b6, 765 - 0x01b50512, 766 - 0xf094bd16, 767 - 0x00800999, 611 + 0xf404bd00, 612 + /* 0x05aa: main_not_ctx_save */ 613 + 0xef94110e, 614 + 0x01f5f010, 615 + 0x0002f87e, 616 + 0xfee40ef5, 617 + /* 0x05b8: main_done */ 618 + 0x29f024bd, 619 + 0x3000801f, 620 + 0x0002f602, 621 + 0x0ef504bd, 622 + /* 0x05ca: ih */ 623 + 0x00f9fed2, 624 + 0x88fe80f9, 625 + 0xf980f901, 626 + 0xf9a0f990, 627 + 0xf9d0f9b0, 628 + 0xbdf0f9e0, 629 + 0x02004a04, 630 + 0xc400aacf, 631 + 0x0bf404ab, 632 + 0x4e100d23, 633 + 0xeecf1a00, 634 + 0x19004f00, 635 + 0x7e00ffcf, 636 + 0xb7000004, 637 + 0x0e0400b0, 638 + 0x1d004001, 639 + 0xbd000ef6, 640 + /* 0x060d: ih_no_fifo */ 641 + 0x00abe404, 642 + 0x0c0bf401, 643 + 0x014e100d, 644 + 0x00047e40, 645 + /* 0x061d: ih_no_ctxsw */ 646 + 0x00abe400, 647 + 0x560bf404, 648 + 0x4007088e, 649 + 0x0000657e, 650 + 0x0080ffb2, 651 + 0x0ff60204, 652 + 0x8e04bd00, 653 + 0x7e400704, 654 + 0xb2000065, 655 + 0x030080ff, 656 + 0x000ff602, 657 + 0xfec704bd, 658 + 0x02ee9450, 659 + 0x4007008f, 660 + 0x7e00efbb, 661 + 0x80000065, 662 + 0xf6020200, 663 + 0x04bd000f, 664 + 0xf87e030f, 665 + 0x004b0002, 666 + 0x8ebfb201, 667 + 0x7e400144, 668 + /* 0x0677: ih_no_fwmthd */ 669 + 0x4b00008f, 670 + 0xb0bd0504, 671 + 0xf4b4abff, 672 + 0x00800c0b, 673 + 0x0bf60307, 674 + /* 0x068b: ih_no_other */ 675 + 0x4004bd00, 676 + 0x0af60100, 677 + 0xfc04bd00, 678 + 0xfce0fcf0, 679 + 0xfcb0fcd0, 680 + 0xfc90fca0, 681 + 0x0088fe80, 682 + 0x00fc80fc, 683 + 0xf80032f4, 684 + /* 0x06ad: ctx_4170s */ 685 + 0x10f5f001, 686 + 0x708effb2, 687 + 0x8f7e4041, 688 + 0x00f80000, 689 + /* 0x06bc: ctx_4170w */ 690 + 0x4041708e, 691 + 0x0000657e, 692 + 0xf4f0ffb2, 693 + 0xf31bf410, 694 + /* 0x06ce: ctx_redswitch */ 695 + 0x004e00f8, 696 + 0x40e5f002, 697 + 0xf020e5f0, 698 + 0x008010e5, 699 + 0x0ef60185, 700 + 0x0f04bd00, 701 + /* 0x06e5: ctx_redswitch_delay */ 702 + 0x01f2b608, 703 + 0xf1fd1bf4, 704 + 0xf10400e5, 705 + 0x800100e5, 706 + 0xf6018500, 707 + 0x04bd000e, 708 + /* 0x06fe: ctx_86c */ 709 + 0x008000f8, 710 + 0x0ff60223, 711 + 0xb204bd00, 712 + 0x8a148eff, 713 + 0x008f7e40, 714 + 0x8effb200, 715 + 0x7e41a88c, 716 + 0xf800008f, 717 + /* 0x071d: ctx_mem */ 718 + 0x84008000, 719 + 0x000ff602, 720 + /* 0x0726: ctx_mem_wait */ 721 + 0x008f04bd, 722 + 0xffcf0284, 723 + 0x05fffd00, 724 + 0xf8f61bf4, 725 + /* 0x0735: ctx_load */ 726 + 0xf094bd00, 727 + 0x00800599, 728 + 0x09f60237, 729 + 0x0a04bd00, 730 + 0x00b87e0c, 731 + 0x80f4bd00, 732 + 0xf6028900, 733 + 0x04bd000f, 734 + 0x02c10080, 735 + 0xbd0002f6, 736 + 0x83008004, 737 + 0x0002f602, 738 + 0x070f04bd, 739 + 0x00071d7e, 740 + 0x02c00080, 741 + 0xbd0002f6, 742 + 0x000bfe04, 743 + 0xb61f2af0, 744 + 0x20b60424, 745 + 0xf094bd02, 746 + 0x00800899, 768 747 0x09f60237, 769 748 0x8004bd00, 770 749 0xf6028100, 771 - 0x04bd0001, 772 - 0x00800102, 773 - 0x02f60288, 774 - 0x4104bd00, 775 - 0x13f00100, 776 - 0x0501fa06, 750 + 0x04bd0002, 751 + 0x000000d2, 752 + 0x0225f080, 753 + 0x02880080, 754 + 0xbd0002f6, 755 + 0x42100104, 756 + 0x23f00200, 757 + 0x0512fa02, 777 758 0x94bd03f8, 778 - 0x800999f0, 759 + 0x800899f0, 779 760 0xf6021700, 780 761 0x04bd0009, 781 - 0x99f094bd, 782 - 0x17008005, 783 - 0x0009f602, 784 - 0x00f804bd, 785 - /* 0x0820: ctx_chan */ 786 - 0x0007347e, 787 - 0xb87e0c0a, 788 - 0x050f0000, 789 - 0x00071c7e, 790 - /* 0x0832: ctx_mmio_exec */ 791 - 0x039800f8, 792 - 0x81008041, 793 - 0x0003f602, 794 - 0x34bd04bd, 795 - /* 0x0840: ctx_mmio_loop */ 796 - 0xf4ff34c4, 797 - 0x00450e1b, 798 - 0x0653f002, 799 - 0xf80535fa, 800 - /* 0x0851: ctx_mmio_pull */ 801 - 0x804e9803, 802 - 0x7e814f98, 803 - 0xb600008f, 804 - 0x12b60830, 805 - 0xdf1bf401, 806 - /* 0x0864: ctx_mmio_done */ 807 - 0x80160398, 808 - 0xf6028100, 809 - 0x04bd0003, 810 - 0x414000b5, 811 - 0x13f00100, 812 - 0x0601fa06, 813 - 0x00f803f8, 814 - /* 0x0880: ctx_xfer */ 815 - 0x0080040e, 816 - 0x0ef60302, 817 - /* 0x088b: ctx_xfer_idle */ 818 - 0x8e04bd00, 819 - 0xcf030000, 820 - 0xe4f100ee, 821 - 0x1bf42000, 822 - 0x0611f4f5, 823 - /* 0x089f: ctx_xfer_pre */ 824 - 0x0f0c02f4, 825 - 0x06fd7e10, 826 - 0x1b11f400, 827 - /* 0x08a8: ctx_xfer_pre_load */ 828 - 0xac7e020f, 829 - 0xbb7e0006, 830 - 0xcd7e0006, 831 - 0xf4bd0006, 832 - 0x0006ac7e, 833 - 0x0007347e, 834 - /* 0x08c0: ctx_xfer_exec */ 835 - 0xbd160198, 836 - 0x05008024, 837 - 0x0002f601, 838 - 0x1fb204bd, 839 - 0x41a5008e, 840 - 0x00008f7e, 841 - 0xf001fcf0, 842 - 0x24b6022c, 843 - 0x05f2fd01, 844 - 0x048effb2, 845 - 0x8f7e41a5, 846 - 0x167e0000, 847 - 0x24bd0002, 848 - 0x0247fc80, 849 - 0xbd0002f6, 850 - 0x012cf004, 851 - 0x800320b6, 852 - 0xf6024afc, 762 + 0xb6810198, 763 + 0x02981814, 764 + 0x0825b680, 765 + 0xb50512fd, 766 + 0x94bd1601, 767 + 0x800999f0, 768 + 0xf6023700, 769 + 0x04bd0009, 770 + 0x02810080, 771 + 0xbd0001f6, 772 + 0x80010204, 773 + 0xf6028800, 853 774 0x04bd0002, 854 - 0xf001acf0, 855 - 0x000b06a5, 856 - 0x98000c98, 857 - 0x000e010d, 858 - 0x00013d7e, 859 - 0xec7e080a, 860 - 0x0a7e0000, 861 - 0x01f40002, 862 - 0x7e0c0a12, 775 + 0xf0010041, 776 + 0x01fa0613, 777 + 0xbd03f805, 778 + 0x0999f094, 779 + 0x02170080, 780 + 0xbd0009f6, 781 + 0xf094bd04, 782 + 0x00800599, 783 + 0x09f60217, 784 + 0xf804bd00, 785 + /* 0x0821: ctx_chan */ 786 + 0x07357e00, 787 + 0x7e0c0a00, 863 788 0x0f0000b8, 864 - 0x071c7e05, 865 - 0x2d02f400, 866 - /* 0x093c: ctx_xfer_post */ 867 - 0xac7e020f, 868 - 0xf4bd0006, 869 - 0x0006fd7e, 870 - 0x0002277e, 871 - 0x0006bb7e, 872 - 0xac7ef4bd, 789 + 0x071d7e05, 790 + /* 0x0833: ctx_mmio_exec */ 791 + 0x9800f800, 792 + 0x00804103, 793 + 0x03f60281, 794 + 0xbd04bd00, 795 + /* 0x0841: ctx_mmio_loop */ 796 + 0xff34c434, 797 + 0x450e1bf4, 798 + 0x53f00200, 799 + 0x0535fa06, 800 + /* 0x0852: ctx_mmio_pull */ 801 + 0x4e9803f8, 802 + 0x814f9880, 803 + 0x00008f7e, 804 + 0xb60830b6, 805 + 0x1bf40112, 806 + /* 0x0865: ctx_mmio_done */ 807 + 0x160398df, 808 + 0x02810080, 809 + 0xbd0003f6, 810 + 0x4000b504, 811 + 0xf0010041, 812 + 0x01fa0613, 813 + 0xf803f806, 814 + /* 0x0881: ctx_xfer */ 815 + 0x80040e00, 816 + 0xf6030200, 817 + 0x04bd000e, 818 + /* 0x088c: ctx_xfer_idle */ 819 + 0x0300008e, 820 + 0xf100eecf, 821 + 0xf42000e4, 822 + 0x11f4f51b, 823 + 0x0c02f406, 824 + /* 0x08a0: ctx_xfer_pre */ 825 + 0xfe7e100f, 873 826 0x11f40006, 874 - 0x40019810, 875 - 0xf40511fd, 876 - 0x327e070b, 877 - /* 0x0966: ctx_xfer_no_post_mmio */ 878 - /* 0x0966: ctx_xfer_done */ 879 - 0x00f80008, 827 + /* 0x08a9: ctx_xfer_pre_load */ 828 + 0x7e020f1b, 829 + 0x7e0006ad, 830 + 0x7e0006bc, 831 + 0xbd0006ce, 832 + 0x06ad7ef4, 833 + 0x07357e00, 834 + /* 0x08c1: ctx_xfer_exec */ 835 + 0x16019800, 836 + 0x008024bd, 837 + 0x02f60105, 838 + 0xb204bd00, 839 + 0xa5008e1f, 840 + 0x008f7e41, 841 + 0x01fcf000, 842 + 0xb6022cf0, 843 + 0xf2fd0124, 844 + 0x8effb205, 845 + 0x7e41a504, 846 + 0x7e00008f, 847 + 0xbd000216, 848 + 0x47fc8024, 849 + 0x0002f602, 850 + 0x2cf004bd, 851 + 0x0320b601, 852 + 0x024afc80, 853 + 0xbd0002f6, 854 + 0x01acf004, 855 + 0x0b06a5f0, 856 + 0x000c9800, 857 + 0x0e010d98, 858 + 0x013d7e00, 859 + 0x7e080a00, 860 + 0x7e0000ec, 861 + 0xf400020a, 862 + 0x0c0a1201, 863 + 0x0000b87e, 864 + 0x1d7e050f, 865 + 0x02f40007, 866 + /* 0x093d: ctx_xfer_post */ 867 + 0x7e020f2d, 868 + 0xbd0006ad, 869 + 0x06fe7ef4, 870 + 0x02277e00, 871 + 0x06bc7e00, 872 + 0x7ef4bd00, 873 + 0xf40006ad, 874 + 0x01981011, 875 + 0x0511fd40, 876 + 0x7e070bf4, 877 + /* 0x0967: ctx_xfer_no_post_mmio */ 878 + /* 0x0967: ctx_xfer_done */ 879 + 0xf8000833, 880 880 0x00000000, 881 881 0x00000000, 882 882 0x00000000,
+157 -154
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
··· 26 26 #include "fuc/os.h" 27 27 28 28 #include <core/client.h> 29 - #include <core/option.h> 30 29 #include <core/firmware.h> 31 - #include <subdev/secboot.h> 30 + #include <core/option.h> 31 + #include <subdev/acr.h> 32 32 #include <subdev/fb.h> 33 33 #include <subdev/mc.h> 34 34 #include <subdev/pmu.h> ··· 1636 1636 1637 1637 static void 1638 1638 gf100_gr_init_fw(struct nvkm_falcon *falcon, 1639 - struct gf100_gr_fuc *code, struct gf100_gr_fuc *data) 1639 + struct nvkm_blob *code, struct nvkm_blob *data) 1640 1640 { 1641 1641 nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); 1642 1642 nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); ··· 1690 1690 { 1691 1691 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1692 1692 struct nvkm_device *device = subdev->device; 1693 - struct nvkm_secboot *sb = device->secboot; 1694 - u32 secboot_mask = 0; 1693 + u32 lsf_mask = 0; 1695 1694 int ret; 1696 1695 1697 1696 /* load fuc microcode */ 1698 1697 nvkm_mc_unk260(device, 0); 1699 1698 1700 1699 /* securely-managed falcons must be reset using secure boot */ 1701 - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) 1702 - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); 1703 - else 1704 - gf100_gr_init_fw(gr->fecs.falcon, &gr->fuc409c, &gr->fuc409d); 1705 1700 1706 - if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) 1707 - secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); 1708 - else 1709 - gf100_gr_init_fw(gr->gpccs.falcon, &gr->fuc41ac, &gr->fuc41ad); 1701 + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) { 1702 + gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst, 1703 + &gr->fecs.data); 1704 + } else { 1705 + lsf_mask |= BIT(NVKM_ACR_LSF_FECS); 1706 + } 1710 1707 1711 - if (secboot_mask != 0) { 1712 - int ret = nvkm_secboot_reset(sb, secboot_mask); 1708 + if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) { 1709 + gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst, 1710 + &gr->gpccs.data); 1711 + } else { 1712 + lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS); 1713 + } 1714 + 1715 + if (lsf_mask) { 1716 + ret = nvkm_acr_bootstrap_falcons(device, lsf_mask); 1713 1717 if (ret) 1714 1718 return ret; 1715 1719 } ··· 1725 1721 nvkm_wr32(device, 0x41a10c, 0x00000000); 1726 1722 nvkm_wr32(device, 0x40910c, 0x00000000); 1727 1723 1728 - nvkm_falcon_start(gr->gpccs.falcon); 1729 - nvkm_falcon_start(gr->fecs.falcon); 1724 + nvkm_falcon_start(&gr->gpccs.falcon); 1725 + nvkm_falcon_start(&gr->fecs.falcon); 1730 1726 1731 1727 if (nvkm_msec(device, 2000, 1732 1728 if (nvkm_rd32(device, 0x409800) & 0x00000001) ··· 1788 1784 1789 1785 /* load HUB microcode */ 1790 1786 nvkm_mc_unk260(device, 0); 1791 - nvkm_falcon_load_dmem(gr->fecs.falcon, 1787 + nvkm_falcon_load_dmem(&gr->fecs.falcon, 1792 1788 gr->func->fecs.ucode->data.data, 0x0, 1793 1789 gr->func->fecs.ucode->data.size, 0); 1794 - nvkm_falcon_load_imem(gr->fecs.falcon, 1790 + nvkm_falcon_load_imem(&gr->fecs.falcon, 1795 1791 gr->func->fecs.ucode->code.data, 0x0, 1796 1792 gr->func->fecs.ucode->code.size, 0, 0, false); 1797 1793 1798 1794 /* load GPC microcode */ 1799 - nvkm_falcon_load_dmem(gr->gpccs.falcon, 1795 + nvkm_falcon_load_dmem(&gr->gpccs.falcon, 1800 1796 gr->func->gpccs.ucode->data.data, 0x0, 1801 1797 gr->func->gpccs.ucode->data.size, 0); 1802 - nvkm_falcon_load_imem(gr->gpccs.falcon, 1798 + nvkm_falcon_load_imem(&gr->gpccs.falcon, 1803 1799 gr->func->gpccs.ucode->code.data, 0x0, 1804 1800 gr->func->gpccs.ucode->code.size, 0, 0, false); 1805 1801 nvkm_mc_unk260(device, 1); ··· 1945 1941 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1946 1942 struct nvkm_device *device = subdev->device; 1947 1943 int i, j; 1948 - int ret; 1949 - 1950 - ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs.falcon); 1951 - if (ret) 1952 - return ret; 1953 - 1954 - mutex_init(&gr->fecs.mutex); 1955 - 1956 - ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs.falcon); 1957 - if (ret) 1958 - return ret; 1959 1944 1960 1945 nvkm_pmu_pgob(device->pmu, false); 1961 1946 ··· 1985 1992 1986 1993 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); 1987 1994 1988 - ret = nvkm_falcon_get(gr->fecs.falcon, subdev); 1995 + ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); 1989 1996 if (ret) 1990 1997 return ret; 1991 1998 1992 - ret = nvkm_falcon_get(gr->gpccs.falcon, subdev); 1999 + ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev); 1993 2000 if (ret) 1994 2001 return ret; 1995 2002 ··· 1997 2004 } 1998 2005 1999 2006 static int 2000 - gf100_gr_fini_(struct nvkm_gr *base, bool suspend) 2007 + gf100_gr_fini(struct nvkm_gr *base, bool suspend) 2001 2008 { 2002 2009 struct gf100_gr *gr = gf100_gr(base); 2003 2010 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 2004 - nvkm_falcon_put(gr->gpccs.falcon, subdev); 2005 - nvkm_falcon_put(gr->fecs.falcon, subdev); 2011 + nvkm_falcon_put(&gr->gpccs.falcon, subdev); 2012 + nvkm_falcon_put(&gr->fecs.falcon, subdev); 2006 2013 return 0; 2007 - } 2008 - 2009 - void 2010 - gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc) 2011 - { 2012 - kfree(fuc->data); 2013 - fuc->data = NULL; 2014 - } 2015 - 2016 - static void 2017 - gf100_gr_dtor_init(struct gf100_gr_pack *pack) 2018 - { 2019 - vfree(pack); 2020 2014 } 2021 2015 2022 2016 void * ··· 2011 2031 { 2012 2032 struct gf100_gr *gr = gf100_gr(base); 2013 2033 2014 - if (gr->func->dtor) 2015 - gr->func->dtor(gr); 2016 2034 kfree(gr->data); 2017 2035 2018 - nvkm_falcon_del(&gr->gpccs.falcon); 2019 - nvkm_falcon_del(&gr->fecs.falcon); 2036 + nvkm_falcon_dtor(&gr->gpccs.falcon); 2037 + nvkm_falcon_dtor(&gr->fecs.falcon); 2020 2038 2021 - gf100_gr_dtor_fw(&gr->fuc409c); 2022 - gf100_gr_dtor_fw(&gr->fuc409d); 2023 - gf100_gr_dtor_fw(&gr->fuc41ac); 2024 - gf100_gr_dtor_fw(&gr->fuc41ad); 2039 + nvkm_blob_dtor(&gr->fecs.inst); 2040 + nvkm_blob_dtor(&gr->fecs.data); 2041 + nvkm_blob_dtor(&gr->gpccs.inst); 2042 + nvkm_blob_dtor(&gr->gpccs.data); 2025 2043 2026 - gf100_gr_dtor_init(gr->fuc_bundle); 2027 - gf100_gr_dtor_init(gr->fuc_method); 2028 - gf100_gr_dtor_init(gr->fuc_sw_ctx); 2029 - gf100_gr_dtor_init(gr->fuc_sw_nonctx); 2044 + vfree(gr->bundle); 2045 + vfree(gr->method); 2046 + vfree(gr->sw_ctx); 2047 + vfree(gr->sw_nonctx); 2030 2048 2031 2049 return gr; 2032 2050 } ··· 2034 2056 .dtor = gf100_gr_dtor, 2035 2057 .oneinit = gf100_gr_oneinit, 2036 2058 .init = gf100_gr_init_, 2037 - .fini = gf100_gr_fini_, 2059 + .fini = gf100_gr_fini, 2038 2060 .intr = gf100_gr_intr, 2039 2061 .units = gf100_gr_units, 2040 2062 .chan_new = gf100_gr_chan_new, ··· 2045 2067 .ctxsw.inst = gf100_gr_ctxsw_inst, 2046 2068 }; 2047 2069 2048 - int 2049 - gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname, 2050 - struct gf100_gr_fuc *fuc, int ret) 2051 - { 2052 - struct nvkm_subdev *subdev = &gr->base.engine.subdev; 2053 - struct nvkm_device *device = subdev->device; 2054 - const struct firmware *fw; 2055 - char f[32]; 2056 - 2057 - /* see if this firmware has a legacy path */ 2058 - if (!strcmp(fwname, "fecs_inst")) 2059 - fwname = "fuc409c"; 2060 - else if (!strcmp(fwname, "fecs_data")) 2061 - fwname = "fuc409d"; 2062 - else if (!strcmp(fwname, "gpccs_inst")) 2063 - fwname = "fuc41ac"; 2064 - else if (!strcmp(fwname, "gpccs_data")) 2065 - fwname = "fuc41ad"; 2066 - else { 2067 - /* nope, let's just return the error we got */ 2068 - nvkm_error(subdev, "failed to load %s\n", fwname); 2069 - return ret; 2070 - } 2071 - 2072 - /* yes, try to load from the legacy path */ 2073 - nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname); 2074 - 2075 - snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); 2076 - ret = request_firmware(&fw, f, device->dev); 2077 - if (ret) { 2078 - snprintf(f, sizeof(f), "nouveau/%s", fwname); 2079 - ret = request_firmware(&fw, f, device->dev); 2080 - if (ret) { 2081 - nvkm_error(subdev, "failed to load %s\n", fwname); 2082 - return ret; 2083 - } 2084 - } 2085 - 2086 - fuc->size = fw->size; 2087 - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 2088 - release_firmware(fw); 2089 - return (fuc->data != NULL) ? 0 : -ENOMEM; 2090 - } 2070 + static const struct nvkm_falcon_func 2071 + gf100_gr_flcn = { 2072 + .fbif = 0x600, 2073 + .load_imem = nvkm_falcon_v1_load_imem, 2074 + .load_dmem = nvkm_falcon_v1_load_dmem, 2075 + .read_dmem = nvkm_falcon_v1_read_dmem, 2076 + .bind_context = nvkm_falcon_v1_bind_context, 2077 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 2078 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 2079 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 2080 + .start = nvkm_falcon_v1_start, 2081 + .enable = nvkm_falcon_v1_enable, 2082 + .disable = nvkm_falcon_v1_disable, 2083 + }; 2091 2084 2092 2085 int 2093 - gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname, 2094 - struct gf100_gr_fuc *fuc) 2095 - { 2096 - const struct firmware *fw; 2097 - int ret; 2098 - 2099 - ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw); 2100 - if (ret) { 2101 - ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret); 2102 - if (ret) 2103 - return -ENODEV; 2104 - return 0; 2105 - } 2106 - 2107 - fuc->size = fw->size; 2108 - fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); 2109 - nvkm_firmware_put(fw); 2110 - return (fuc->data != NULL) ? 0 : -ENOMEM; 2111 - } 2112 - 2113 - int 2114 - gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device, 2115 - int index, struct gf100_gr *gr) 2116 - { 2117 - gr->func = func; 2118 - gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW", 2119 - func->fecs.ucode == NULL); 2120 - 2121 - return nvkm_gr_ctor(&gf100_gr_, device, index, 2122 - gr->firmware || func->fecs.ucode != NULL, 2123 - &gr->base); 2124 - } 2125 - 2126 - int 2127 - gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, 2128 - int index, struct nvkm_gr **pgr) 2086 + gf100_gr_new_(const struct gf100_gr_fwif *fwif, 2087 + struct nvkm_device *device, int index, struct nvkm_gr **pgr) 2129 2088 { 2130 2089 struct gf100_gr *gr; 2131 2090 int ret; ··· 2071 2156 return -ENOMEM; 2072 2157 *pgr = &gr->base; 2073 2158 2074 - ret = gf100_gr_ctor(func, device, index, gr); 2159 + ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base); 2075 2160 if (ret) 2076 2161 return ret; 2077 2162 2078 - if (gr->firmware) { 2079 - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 2080 - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 2081 - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 2082 - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 2083 - return -ENODEV; 2084 - } 2163 + fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr); 2164 + if (IS_ERR(fwif)) 2165 + return -ENODEV; 2166 + 2167 + gr->func = fwif->func; 2168 + 2169 + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, 2170 + "fecs", 0x409000, &gr->fecs.falcon); 2171 + if (ret) 2172 + return ret; 2173 + 2174 + mutex_init(&gr->fecs.mutex); 2175 + 2176 + ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, 2177 + "gpccs", 0x41a000, &gr->gpccs.falcon); 2178 + if (ret) 2179 + return ret; 2085 2180 2086 2181 return 0; 2182 + } 2183 + 2184 + void 2185 + gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds) 2186 + { 2187 + struct nvkm_device *device = gr->base.engine.subdev.device; 2188 + int gpc, i, j; 2189 + u32 data; 2190 + 2191 + for (gpc = 0, i = 0; i < 4; i++) { 2192 + for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) 2193 + data |= gr->tpc_nr[gpc] << (j * 4); 2194 + if (pd) 2195 + nvkm_wr32(device, 0x406028 + (i * 4), data); 2196 + if (ds) 2197 + nvkm_wr32(device, 0x405870 + (i * 4), data); 2198 + } 2087 2199 } 2088 2200 2089 2201 void ··· 2237 2295 2238 2296 gr->func->init_gpc_mmu(gr); 2239 2297 2240 - if (gr->fuc_sw_nonctx) 2241 - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); 2298 + if (gr->sw_nonctx) 2299 + gf100_gr_mmio(gr, gr->sw_nonctx); 2242 2300 else 2243 2301 gf100_gr_mmio(gr, gr->func->mmio); 2244 2302 ··· 2262 2320 gr->func->init_bios_2(gr); 2263 2321 if (gr->func->init_swdx_pes_mask) 2264 2322 gr->func->init_swdx_pes_mask(gr); 2323 + if (gr->func->init_fs) 2324 + gr->func->init_fs(gr); 2265 2325 2266 2326 nvkm_wr32(device, 0x400500, 0x00010001); 2267 2327 ··· 2282 2338 if (gr->func->init_40601c) 2283 2339 gr->func->init_40601c(gr); 2284 2340 2285 - nvkm_wr32(device, 0x404490, 0xc0000000); 2286 2341 nvkm_wr32(device, 0x406018, 0xc0000000); 2342 + nvkm_wr32(device, 0x404490, 0xc0000000); 2287 2343 2288 2344 if (gr->func->init_sked_hww_esr) 2289 2345 gr->func->init_sked_hww_esr(gr); ··· 2398 2454 }; 2399 2455 2400 2456 int 2457 + gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 2458 + { 2459 + gr->firmware = false; 2460 + return 0; 2461 + } 2462 + 2463 + static int 2464 + gf100_gr_load_fw(struct gf100_gr *gr, const char *name, 2465 + struct nvkm_blob *blob) 2466 + { 2467 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 2468 + struct nvkm_device *device = subdev->device; 2469 + const struct firmware *fw; 2470 + char f[32]; 2471 + int ret; 2472 + 2473 + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name); 2474 + ret = request_firmware(&fw, f, device->dev); 2475 + if (ret) { 2476 + snprintf(f, sizeof(f), "nouveau/%s", name); 2477 + ret = request_firmware(&fw, f, device->dev); 2478 + if (ret) { 2479 + nvkm_error(subdev, "failed to load %s\n", name); 2480 + return ret; 2481 + } 2482 + } 2483 + 2484 + blob->size = fw->size; 2485 + blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL); 2486 + release_firmware(fw); 2487 + return (blob->data != NULL) ? 0 : -ENOMEM; 2488 + } 2489 + 2490 + int 2491 + gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 2492 + { 2493 + struct nvkm_device *device = gr->base.engine.subdev.device; 2494 + 2495 + if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false)) 2496 + return -EINVAL; 2497 + 2498 + if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) || 2499 + gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) || 2500 + gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) || 2501 + gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data)) 2502 + return -ENOENT; 2503 + 2504 + gr->firmware = true; 2505 + return 0; 2506 + } 2507 + 2508 + static const struct gf100_gr_fwif 2509 + gf100_gr_fwif[] = { 2510 + { -1, gf100_gr_load, &gf100_gr }, 2511 + { -1, gf100_gr_nofw, &gf100_gr }, 2512 + {} 2513 + }; 2514 + 2515 + int 2401 2516 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 2402 2517 { 2403 - return gf100_gr_new_(&gf100_gr, device, index, pgr); 2518 + return gf100_gr_new_(gf100_gr_fwif, device, index, pgr); 2404 2519 }
+51 -38
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
··· 31 31 #include <subdev/mmu.h> 32 32 #include <engine/falcon.h> 33 33 34 + struct nvkm_acr_lsfw; 35 + 34 36 #define GPC_MAX 32 35 37 #define TPC_MAX_PER_GPC 8 36 38 #define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC) ··· 55 53 u32 data; 56 54 u32 shift; 57 55 int buffer; 58 - }; 59 - 60 - struct gf100_gr_fuc { 61 - u32 *data; 62 - u32 size; 63 56 }; 64 57 65 58 struct gf100_gr_zbc_color { ··· 80 83 struct nvkm_gr base; 81 84 82 85 struct { 83 - struct nvkm_falcon *falcon; 86 + struct nvkm_falcon falcon; 87 + struct nvkm_blob inst; 88 + struct nvkm_blob data; 89 + 84 90 struct mutex mutex; 85 91 u32 disable; 86 92 } fecs; 87 93 88 94 struct { 89 - struct nvkm_falcon *falcon; 95 + struct nvkm_falcon falcon; 96 + struct nvkm_blob inst; 97 + struct nvkm_blob data; 90 98 } gpccs; 91 99 92 - struct gf100_gr_fuc fuc409c; 93 - struct gf100_gr_fuc fuc409d; 94 - struct gf100_gr_fuc fuc41ac; 95 - struct gf100_gr_fuc fuc41ad; 96 100 bool firmware; 97 101 98 102 /* 99 103 * Used if the register packs are loaded from NVIDIA fw instead of 100 104 * using hardcoded arrays. To be allocated with vzalloc(). 101 105 */ 102 - struct gf100_gr_pack *fuc_sw_nonctx; 103 - struct gf100_gr_pack *fuc_sw_ctx; 104 - struct gf100_gr_pack *fuc_bundle; 105 - struct gf100_gr_pack *fuc_method; 106 + struct gf100_gr_pack *sw_nonctx; 107 + struct gf100_gr_pack *sw_ctx; 108 + struct gf100_gr_pack *bundle; 109 + struct gf100_gr_pack *method; 106 110 107 111 struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT]; 108 112 struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; ··· 138 140 u32 size_pm; 139 141 }; 140 142 141 - int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *, 142 - int, struct gf100_gr *); 143 - int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, 144 - int, struct nvkm_gr **); 145 - void *gf100_gr_dtor(struct nvkm_gr *); 146 - 147 143 int gf100_gr_fecs_bind_pointer(struct gf100_gr *, u32 inst); 148 144 149 145 struct gf100_gr_func_zbc { ··· 149 157 }; 150 158 151 159 struct gf100_gr_func { 152 - void (*dtor)(struct gf100_gr *); 153 160 void (*oneinit_tiles)(struct gf100_gr *); 154 161 void (*oneinit_sm_id)(struct gf100_gr *); 155 162 int (*init)(struct gf100_gr *); ··· 162 171 void (*init_rop_active_fbps)(struct gf100_gr *); 163 172 void (*init_bios_2)(struct gf100_gr *); 164 173 void (*init_swdx_pes_mask)(struct gf100_gr *); 174 + void (*init_fs)(struct gf100_gr *); 165 175 void (*init_fecs_exceptions)(struct gf100_gr *); 166 176 void (*init_ds_hww_esr_2)(struct gf100_gr *); 167 177 void (*init_40601c)(struct gf100_gr *); ··· 209 217 void gf100_gr_init_tex_hww_esr(struct gf100_gr *, int, int); 210 218 void gf100_gr_init_shader_exceptions(struct gf100_gr *, int, int); 211 219 void gf100_gr_init_400054(struct gf100_gr *); 220 + void gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *, bool, bool); 212 221 extern const struct gf100_gr_func_zbc gf100_gr_zbc; 213 222 214 223 void gf117_gr_init_zcull(struct gf100_gr *); ··· 242 249 void gp102_gr_init_swdx_pes_mask(struct gf100_gr *); 243 250 extern const struct gf100_gr_func_zbc gp102_gr_zbc; 244 251 252 + extern const struct gf100_gr_func gp107_gr; 253 + 254 + void gv100_gr_init_419bd8(struct gf100_gr *); 255 + void gv100_gr_init_504430(struct gf100_gr *, int, int); 256 + void gv100_gr_init_shader_exceptions(struct gf100_gr *, int, int); 257 + void gv100_gr_trap_mp(struct gf100_gr *, int, int); 258 + 245 259 #define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object) 246 260 #include <core/object.h> 247 261 ··· 269 269 270 270 void gf100_gr_ctxctl_debug(struct gf100_gr *); 271 271 272 - void gf100_gr_dtor_fw(struct gf100_gr_fuc *); 273 - int gf100_gr_ctor_fw(struct gf100_gr *, const char *, 274 - struct gf100_gr_fuc *); 275 272 u64 gf100_gr_units(struct nvkm_gr *); 276 273 void gf100_gr_zbc_init(struct gf100_gr *); 277 274 ··· 291 294 for (init = pack->init; init && init->count; init++) 292 295 293 296 struct gf100_gr_ucode { 294 - struct gf100_gr_fuc code; 295 - struct gf100_gr_fuc data; 297 + struct nvkm_blob code; 298 + struct nvkm_blob data; 296 299 }; 297 300 298 301 extern struct gf100_gr_ucode gf100_gr_fecs_ucode; ··· 306 309 void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *); 307 310 void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *); 308 311 int gf100_gr_init_ctxctl(struct gf100_gr *); 309 - 310 - /* external bundles loading functions */ 311 - int gk20a_gr_av_to_init(struct gf100_gr *, const char *, 312 - struct gf100_gr_pack **); 313 - int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *, 314 - struct gf100_gr_pack **); 315 - int gk20a_gr_av_to_method(struct gf100_gr *, const char *, 316 - struct gf100_gr_pack **); 317 - 318 - int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int, 319 - struct nvkm_gr **); 320 312 321 313 /* register init value lists */ 322 314 ··· 389 403 void gm107_gr_init_bios(struct gf100_gr *); 390 404 391 405 void gm200_gr_init_gpc_mmu(struct gf100_gr *); 406 + 407 + struct gf100_gr_fwif { 408 + int version; 409 + int (*load)(struct gf100_gr *, int ver, const struct gf100_gr_fwif *); 410 + const struct gf100_gr_func *func; 411 + const struct nvkm_acr_lsf_func *fecs; 412 + const struct nvkm_acr_lsf_func *gpccs; 413 + }; 414 + 415 + int gf100_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); 416 + int gf100_gr_nofw(struct gf100_gr *, int, const struct gf100_gr_fwif *); 417 + 418 + int gk20a_gr_load_sw(struct gf100_gr *, const char *path, int ver); 419 + 420 + int gm200_gr_load(struct gf100_gr *, int, const struct gf100_gr_fwif *); 421 + extern const struct nvkm_acr_lsf_func gm200_gr_gpccs_acr; 422 + extern const struct nvkm_acr_lsf_func gm200_gr_fecs_acr; 423 + 424 + extern const struct nvkm_acr_lsf_func gm20b_gr_fecs_acr; 425 + void gm20b_gr_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); 426 + void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); 427 + 428 + extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr; 429 + extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr; 430 + 431 + int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int, 432 + struct nvkm_gr **); 392 433 #endif
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
··· 144 144 } 145 145 }; 146 146 147 + static const struct gf100_gr_fwif 148 + gf104_gr_fwif[] = { 149 + { -1, gf100_gr_load, &gf104_gr }, 150 + { -1, gf100_gr_nofw, &gf104_gr }, 151 + {} 152 + }; 153 + 147 154 int 148 155 gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 149 156 { 150 - return gf100_gr_new_(&gf104_gr, device, index, pgr); 157 + return gf100_gr_new_(gf104_gr_fwif, device, index, pgr); 151 158 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
··· 143 143 } 144 144 }; 145 145 146 + const struct gf100_gr_fwif 147 + gf108_gr_fwif[] = { 148 + { -1, gf100_gr_load, &gf108_gr }, 149 + { -1, gf100_gr_nofw, &gf108_gr }, 150 + {} 151 + }; 152 + 146 153 int 147 154 gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 148 155 { 149 - return gf100_gr_new_(&gf108_gr, device, index, pgr); 156 + return gf100_gr_new_(gf108_gr_fwif, device, index, pgr); 150 157 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
··· 119 119 } 120 120 }; 121 121 122 + static const struct gf100_gr_fwif 123 + gf110_gr_fwif[] = { 124 + { -1, gf100_gr_load, &gf110_gr }, 125 + { -1, gf100_gr_nofw, &gf110_gr }, 126 + {} 127 + }; 128 + 122 129 int 123 130 gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 124 131 { 125 - return gf100_gr_new_(&gf110_gr, device, index, pgr); 132 + return gf100_gr_new_(gf110_gr_fwif, device, index, pgr); 126 133 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
··· 184 184 } 185 185 }; 186 186 187 + static const struct gf100_gr_fwif 188 + gf117_gr_fwif[] = { 189 + { -1, gf100_gr_load, &gf117_gr }, 190 + { -1, gf100_gr_nofw, &gf117_gr }, 191 + {} 192 + }; 193 + 187 194 int 188 195 gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 189 196 { 190 - return gf100_gr_new_(&gf117_gr, device, index, pgr); 197 + return gf100_gr_new_(gf117_gr_fwif, device, index, pgr); 191 198 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
··· 210 210 } 211 211 }; 212 212 213 + static const struct gf100_gr_fwif 214 + gf119_gr_fwif[] = { 215 + { -1, gf100_gr_load, &gf119_gr }, 216 + { -1, gf100_gr_nofw, &gf119_gr }, 217 + {} 218 + }; 219 + 213 220 int 214 221 gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 215 222 { 216 - return gf100_gr_new_(&gf119_gr, device, index, pgr); 223 + return gf100_gr_new_(gf119_gr_fwif, device, index, pgr); 217 224 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
··· 489 489 } 490 490 }; 491 491 492 + static const struct gf100_gr_fwif 493 + gk104_gr_fwif[] = { 494 + { -1, gf100_gr_load, &gk104_gr }, 495 + { -1, gf100_gr_nofw, &gk104_gr }, 496 + {} 497 + }; 498 + 492 499 int 493 500 gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 494 501 { 495 - return gf100_gr_new_(&gk104_gr, device, index, pgr); 502 + return gf100_gr_new_(gk104_gr_fwif, device, index, pgr); 496 503 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
··· 385 385 } 386 386 }; 387 387 388 + static const struct gf100_gr_fwif 389 + gk110_gr_fwif[] = { 390 + { -1, gf100_gr_load, &gk110_gr }, 391 + { -1, gf100_gr_nofw, &gk110_gr }, 392 + {} 393 + }; 394 + 388 395 int 389 396 gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 390 397 { 391 - return gf100_gr_new_(&gk110_gr, device, index, pgr); 398 + return gf100_gr_new_(gk110_gr_fwif, device, index, pgr); 392 399 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
··· 136 136 } 137 137 }; 138 138 139 + static const struct gf100_gr_fwif 140 + gk110b_gr_fwif[] = { 141 + { -1, gf100_gr_load, &gk110b_gr }, 142 + { -1, gf100_gr_nofw, &gk110b_gr }, 143 + {} 144 + }; 145 + 139 146 int 140 147 gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 141 148 { 142 - return gf100_gr_new_(&gk110b_gr, device, index, pgr); 149 + return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr); 143 150 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
··· 194 194 } 195 195 }; 196 196 197 + static const struct gf100_gr_fwif 198 + gk208_gr_fwif[] = { 199 + { -1, gf100_gr_load, &gk208_gr }, 200 + { -1, gf100_gr_nofw, &gk208_gr }, 201 + {} 202 + }; 203 + 197 204 int 198 205 gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 199 206 { 200 - return gf100_gr_new_(&gk208_gr, device, index, pgr); 207 + return gf100_gr_new_(gk208_gr_fwif, device, index, pgr); 201 208 }
+75 -65
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
··· 22 22 #include "gf100.h" 23 23 #include "ctxgf100.h" 24 24 25 + #include <core/firmware.h> 25 26 #include <subdev/timer.h> 26 27 27 28 #include <nvif/class.h> ··· 34 33 }; 35 34 36 35 int 37 - gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name, 38 - struct gf100_gr_pack **ppack) 36 + gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name, 37 + int ver, struct gf100_gr_pack **ppack) 39 38 { 40 - struct gf100_gr_fuc fuc; 39 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 40 + struct nvkm_blob blob; 41 41 struct gf100_gr_init *init; 42 42 struct gf100_gr_pack *pack; 43 43 int nent; 44 44 int ret; 45 45 int i; 46 46 47 - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 47 + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); 48 48 if (ret) 49 49 return ret; 50 50 51 - nent = (fuc.size / sizeof(struct gk20a_fw_av)); 51 + nent = (blob.size / sizeof(struct gk20a_fw_av)); 52 52 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); 53 53 if (!pack) { 54 54 ret = -ENOMEM; ··· 61 59 62 60 for (i = 0; i < nent; i++) { 63 61 struct gf100_gr_init *ent = &init[i]; 64 - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; 62 + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; 65 63 66 64 ent->addr = av->addr; 67 65 ent->data = av->data; ··· 72 70 *ppack = pack; 73 71 74 72 end: 75 - gf100_gr_dtor_fw(&fuc); 73 + nvkm_blob_dtor(&blob); 76 74 return ret; 77 75 } 78 76 ··· 84 82 }; 85 83 86 84 int 87 - gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name, 88 - struct gf100_gr_pack **ppack) 85 + gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name, 86 + int ver, struct gf100_gr_pack **ppack) 89 87 { 90 - struct gf100_gr_fuc fuc; 88 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 89 + struct nvkm_blob blob; 91 90 struct gf100_gr_init *init; 92 91 struct gf100_gr_pack *pack; 93 92 int nent; 94 93 int ret; 95 94 int i; 96 95 97 - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 96 + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); 98 97 if (ret) 99 98 return ret; 100 99 101 - nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); 100 + nent = (blob.size / sizeof(struct gk20a_fw_aiv)); 102 101 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); 103 102 if (!pack) { 104 103 ret = -ENOMEM; ··· 111 108 112 109 for (i = 0; i < nent; i++) { 113 110 struct gf100_gr_init *ent = &init[i]; 114 - struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i]; 111 + struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i]; 115 112 116 113 ent->addr = av->addr; 117 114 ent->data = av->data; ··· 122 119 *ppack = pack; 123 120 124 121 end: 125 - gf100_gr_dtor_fw(&fuc); 122 + nvkm_blob_dtor(&blob); 126 123 return ret; 127 124 } 128 125 129 126 int 130 - gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name, 131 - struct gf100_gr_pack **ppack) 127 + gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name, 128 + int ver, struct gf100_gr_pack **ppack) 132 129 { 133 - struct gf100_gr_fuc fuc; 130 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 131 + struct nvkm_blob blob; 134 132 struct gf100_gr_init *init; 135 133 struct gf100_gr_pack *pack; 136 134 /* We don't suppose we will initialize more than 16 classes here... */ ··· 141 137 int ret; 142 138 int i; 143 139 144 - ret = gf100_gr_ctor_fw(gr, fw_name, &fuc); 140 + ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob); 145 141 if (ret) 146 142 return ret; 147 143 148 - nent = (fuc.size / sizeof(struct gk20a_fw_av)); 144 + nent = (blob.size / sizeof(struct gk20a_fw_av)); 149 145 150 - pack = vzalloc((sizeof(*pack) * max_classes) + 151 - (sizeof(*init) * (nent + 1))); 146 + pack = vzalloc((sizeof(*pack) * (max_classes + 1)) + 147 + (sizeof(*init) * (nent + max_classes + 1))); 152 148 if (!pack) { 153 149 ret = -ENOMEM; 154 150 goto end; 155 151 } 156 152 157 - init = (void *)(pack + max_classes); 153 + init = (void *)(pack + max_classes + 1); 158 154 159 - for (i = 0; i < nent; i++) { 160 - struct gf100_gr_init *ent = &init[i]; 161 - struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i]; 155 + for (i = 0; i < nent; i++, init++) { 156 + struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i]; 162 157 u32 class = av->addr & 0xffff; 163 158 u32 addr = (av->addr & 0xffff0000) >> 14; 164 159 165 160 if (prevclass != class) { 166 - pack[classidx].init = ent; 161 + if (prevclass) /* Add terminator to the method list. */ 162 + init++; 163 + pack[classidx].init = init; 167 164 pack[classidx].type = class; 168 165 prevclass = class; 169 166 if (++classidx >= max_classes) { ··· 174 169 } 175 170 } 176 171 177 - ent->addr = addr; 178 - ent->data = av->data; 179 - ent->count = 1; 180 - ent->pitch = 1; 172 + init->addr = addr; 173 + init->data = av->data; 174 + init->count = 1; 175 + init->pitch = 1; 181 176 } 182 177 183 178 *ppack = pack; 184 179 185 180 end: 186 - gf100_gr_dtor_fw(&fuc); 181 + nvkm_blob_dtor(&blob); 187 182 return ret; 188 183 } 189 184 ··· 229 224 /* Clear SCC RAM */ 230 225 nvkm_wr32(device, 0x40802c, 0x1); 231 226 232 - gf100_gr_mmio(gr, gr->fuc_sw_nonctx); 227 + gf100_gr_mmio(gr, gr->sw_nonctx); 233 228 234 229 ret = gk20a_gr_wait_mem_scrubbing(gr); 235 230 if (ret) ··· 308 303 }; 309 304 310 305 int 311 - gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 306 + gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver) 312 307 { 313 - struct gf100_gr *gr; 314 - int ret; 315 - 316 - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 317 - return -ENOMEM; 318 - *pgr = &gr->base; 319 - 320 - ret = gf100_gr_ctor(&gk20a_gr, device, index, gr); 321 - if (ret) 322 - return ret; 323 - 324 - if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) || 325 - gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) || 326 - gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) || 327 - gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad)) 328 - return -ENODEV; 329 - 330 - ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx); 331 - if (ret) 332 - return ret; 333 - 334 - ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx); 335 - if (ret) 336 - return ret; 337 - 338 - ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle); 339 - if (ret) 340 - return ret; 341 - 342 - ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method); 343 - if (ret) 344 - return ret; 308 + if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) || 309 + gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) || 310 + gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) || 311 + gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method)) 312 + return -ENOENT; 345 313 346 314 return 0; 315 + } 316 + 317 + static int 318 + gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 319 + { 320 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 321 + 322 + if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver, 323 + &gr->fecs.inst) || 324 + nvkm_firmware_load_blob(subdev, "", "fecs_data", ver, 325 + &gr->fecs.data) || 326 + nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver, 327 + &gr->gpccs.inst) || 328 + nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver, 329 + &gr->gpccs.data)) 330 + return -ENOENT; 331 + 332 + gr->firmware = true; 333 + 334 + return gk20a_gr_load_sw(gr, "", ver); 335 + } 336 + 337 + static const struct gf100_gr_fwif 338 + gk20a_gr_fwif[] = { 339 + { -1, gk20a_gr_load, &gk20a_gr }, 340 + {} 341 + }; 342 + 343 + int 344 + gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 345 + { 346 + return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr); 347 347 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
··· 429 429 } 430 430 }; 431 431 432 + static const struct gf100_gr_fwif 433 + gm107_gr_fwif[] = { 434 + { -1, gf100_gr_load, &gm107_gr }, 435 + { -1, gf100_gr_nofw, &gm107_gr }, 436 + {} 437 + }; 438 + 432 439 int 433 440 gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 434 441 { 435 - return gf100_gr_new_(&gm107_gr, device, index, pgr); 442 + return gf100_gr_new_(gm107_gr_fwif, device, index, pgr); 436 443 }
+121 -39
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
··· 24 24 #include "gf100.h" 25 25 #include "ctxgf100.h" 26 26 27 + #include <core/firmware.h> 28 + #include <subdev/acr.h> 27 29 #include <subdev/secboot.h> 30 + 31 + #include <nvfw/flcn.h> 28 32 29 33 #include <nvif/class.h> 30 34 31 35 /******************************************************************************* 32 36 * PGRAPH engine/subdev functions 33 37 ******************************************************************************/ 38 + 39 + static void 40 + gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 41 + { 42 + struct flcn_bl_dmem_desc_v1 hdr; 43 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 44 + hdr.code_dma_base = hdr.code_dma_base + adjust; 45 + hdr.data_dma_base = hdr.data_dma_base + adjust; 46 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 47 + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr); 48 + } 49 + 50 + static void 51 + gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, 52 + struct nvkm_acr_lsfw *lsfw) 53 + { 54 + const u64 base = lsfw->offset.img + lsfw->app_start_offset; 55 + const u64 code = base + lsfw->app_resident_code_offset; 56 + const u64 data = base + lsfw->app_resident_data_offset; 57 + const struct flcn_bl_dmem_desc_v1 hdr = { 58 + .ctx_dma = FALCON_DMAIDX_UCODE, 59 + .code_dma_base = code, 60 + .non_sec_code_off = lsfw->app_resident_code_offset, 61 + .non_sec_code_size = lsfw->app_resident_code_size, 62 + .code_entry_point = lsfw->app_imem_entry, 63 + .data_dma_base = data, 64 + .data_size = lsfw->app_resident_data_size, 65 + }; 66 + 67 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 68 + } 69 + 70 + const struct nvkm_acr_lsf_func 71 + gm200_gr_gpccs_acr = { 72 + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, 73 + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), 74 + .bld_write = gm200_gr_acr_bld_write, 75 + .bld_patch = gm200_gr_acr_bld_patch, 76 + }; 77 + 78 + const struct nvkm_acr_lsf_func 79 + gm200_gr_fecs_acr = { 80 + .bld_size = sizeof(struct flcn_bl_dmem_desc_v1), 81 + .bld_write = gm200_gr_acr_bld_write, 82 + .bld_patch = gm200_gr_acr_bld_patch, 83 + }; 34 84 35 85 int 36 86 gm200_gr_rops(struct gf100_gr *gr) ··· 174 124 } 175 125 } 176 126 177 - int 178 - gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device, 179 - int index, struct nvkm_gr **pgr) 180 - { 181 - struct gf100_gr *gr; 182 - int ret; 183 - 184 - if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 185 - return -ENOMEM; 186 - *pgr = &gr->base; 187 - 188 - ret = gf100_gr_ctor(func, device, index, gr); 189 - if (ret) 190 - return ret; 191 - 192 - /* Load firmwares for non-secure falcons */ 193 - if (!nvkm_secboot_is_managed(device->secboot, 194 - NVKM_SECBOOT_FALCON_FECS)) { 195 - if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) || 196 - (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d))) 197 - return ret; 198 - } 199 - if (!nvkm_secboot_is_managed(device->secboot, 200 - NVKM_SECBOOT_FALCON_GPCCS)) { 201 - if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) || 202 - (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad))) 203 - return ret; 204 - } 205 - 206 - if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) || 207 - (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) || 208 - (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) || 209 - (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method))) 210 - return ret; 211 - 212 - return 0; 213 - } 214 - 215 127 static const struct gf100_gr_func 216 128 gm200_gr = { 217 129 .oneinit_tiles = gm200_gr_oneinit_tiles, ··· 210 198 }; 211 199 212 200 int 201 + gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 202 + { 203 + int ret; 204 + 205 + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, 206 + &gr->fecs.falcon, 207 + NVKM_ACR_LSF_FECS, 208 + "gr/fecs_", ver, fwif->fecs); 209 + if (ret) 210 + return ret; 211 + 212 + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev, 213 + &gr->gpccs.falcon, 214 + NVKM_ACR_LSF_GPCCS, 215 + "gr/gpccs_", ver, 216 + fwif->gpccs); 217 + if (ret) 218 + return ret; 219 + 220 + gr->firmware = true; 221 + 222 + return gk20a_gr_load_sw(gr, "gr/", ver); 223 + } 224 + 225 + MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); 226 + MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); 227 + MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); 228 + MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); 229 + MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); 230 + MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); 231 + MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); 232 + MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); 233 + MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); 234 + MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); 235 + MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); 236 + MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); 237 + 238 + MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); 239 + MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); 240 + MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); 241 + MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); 242 + MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); 243 + MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); 244 + MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); 245 + MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); 246 + MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); 247 + MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); 248 + MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); 249 + MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); 250 + 251 + MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); 252 + MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); 253 + MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); 254 + MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); 255 + MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); 256 + MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); 257 + MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); 258 + MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); 259 + MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); 260 + MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); 261 + MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); 262 + MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); 263 + 264 + static const struct gf100_gr_fwif 265 + gm200_gr_fwif[] = { 266 + { 0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, 267 + {} 268 + }; 269 + 270 + int 213 271 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 214 272 { 215 - return gm200_gr_new_(&gm200_gr, device, index, pgr); 273 + return gf100_gr_new_(gm200_gr_fwif, device, index, pgr); 216 274 }
+96 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
··· 22 22 #include "gf100.h" 23 23 #include "ctxgf100.h" 24 24 25 + #include <core/firmware.h> 26 + #include <subdev/acr.h> 25 27 #include <subdev/timer.h> 26 28 29 + #include <nvfw/flcn.h> 30 + 27 31 #include <nvif/class.h> 32 + 33 + void 34 + gm20b_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 35 + { 36 + struct flcn_bl_dmem_desc hdr; 37 + u64 addr; 38 + 39 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 40 + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); 41 + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); 42 + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); 43 + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); 44 + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); 45 + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); 46 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 47 + 48 + flcn_bl_dmem_desc_dump(&acr->subdev, &hdr); 49 + } 50 + 51 + void 52 + gm20b_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, 53 + struct nvkm_acr_lsfw *lsfw) 54 + { 55 + const u64 base = lsfw->offset.img + lsfw->app_start_offset; 56 + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; 57 + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; 58 + const struct flcn_bl_dmem_desc hdr = { 59 + .ctx_dma = FALCON_DMAIDX_UCODE, 60 + .code_dma_base = lower_32_bits(code), 61 + .non_sec_code_off = lsfw->app_resident_code_offset, 62 + .non_sec_code_size = lsfw->app_resident_code_size, 63 + .code_entry_point = lsfw->app_imem_entry, 64 + .data_dma_base = lower_32_bits(data), 65 + .data_size = lsfw->app_resident_data_size, 66 + .code_dma_base1 = upper_32_bits(code), 67 + .data_dma_base1 = upper_32_bits(data), 68 + }; 69 + 70 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 71 + } 72 + 73 + const struct nvkm_acr_lsf_func 74 + gm20b_gr_fecs_acr = { 75 + .bld_size = sizeof(struct flcn_bl_dmem_desc), 76 + .bld_write = gm20b_gr_acr_bld_write, 77 + .bld_patch = gm20b_gr_acr_bld_patch, 78 + }; 28 79 29 80 static void 30 81 gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) ··· 84 33 u32 val; 85 34 86 35 /* Bypass MMU check for non-secure boot */ 87 - if (!device->secboot) { 36 + if (!device->acr) { 88 37 nvkm_wr32(device, 0x100ce4, 0xffffffff); 89 38 90 39 if (nvkm_rd32(device, 0x100ce4) != 0xffffffff) ··· 136 85 } 137 86 }; 138 87 88 + static int 89 + gm20b_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 90 + { 91 + struct nvkm_subdev *subdev = &gr->base.engine.subdev; 92 + int ret; 93 + 94 + ret = nvkm_acr_lsfw_load_bl_inst_data_sig(subdev, &gr->fecs.falcon, 95 + NVKM_ACR_LSF_FECS, 96 + "gr/fecs_", ver, fwif->fecs); 97 + if (ret) 98 + return ret; 99 + 100 + 101 + if (nvkm_firmware_load_blob(subdev, "gr/", "gpccs_inst", ver, 102 + &gr->gpccs.inst) || 103 + nvkm_firmware_load_blob(subdev, "gr/", "gpccs_data", ver, 104 + &gr->gpccs.data)) 105 + return -ENOENT; 106 + 107 + gr->firmware = true; 108 + 109 + return gk20a_gr_load_sw(gr, "gr/", ver); 110 + } 111 + 112 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 113 + MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); 114 + MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); 115 + MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); 116 + MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); 117 + MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); 118 + MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); 119 + MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); 120 + MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); 121 + MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); 122 + MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); 123 + #endif 124 + 125 + static const struct gf100_gr_fwif 126 + gm20b_gr_fwif[] = { 127 + { 0, gm20b_gr_load, &gm20b_gr, &gm20b_gr_fecs_acr }, 128 + {} 129 + }; 130 + 139 131 int 140 132 gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 141 133 { 142 - return gm200_gr_new_(&gm20b_gr, device, index, pgr); 134 + return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr); 143 135 }
+20 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
··· 135 135 } 136 136 }; 137 137 138 + MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); 139 + MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); 140 + MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); 141 + MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); 142 + MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); 143 + MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); 144 + MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); 145 + MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); 146 + MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); 147 + MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); 148 + MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); 149 + MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); 150 + 151 + static const struct gf100_gr_fwif 152 + gp100_gr_fwif[] = { 153 + { 0, gm200_gr_load, &gp100_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, 154 + {} 155 + }; 156 + 138 157 int 139 158 gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 140 159 { 141 - return gm200_gr_new_(&gp100_gr, device, index, pgr); 160 + return gf100_gr_new_(gp100_gr_fwif, device, index, pgr); 142 161 }
+20 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
··· 131 131 } 132 132 }; 133 133 134 + MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); 135 + MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); 136 + MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); 137 + MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); 138 + MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); 139 + MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); 140 + MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); 141 + MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); 142 + MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); 143 + MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); 144 + MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); 145 + MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); 146 + 147 + static const struct gf100_gr_fwif 148 + gp102_gr_fwif[] = { 149 + { 0, gm200_gr_load, &gp102_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, 150 + {} 151 + }; 152 + 134 153 int 135 154 gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 136 155 { 137 - return gm200_gr_new_(&gp102_gr, device, index, pgr); 156 + return gf100_gr_new_(gp102_gr_fwif, device, index, pgr); 138 157 }
+33 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c
··· 59 59 } 60 60 }; 61 61 62 + MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); 63 + MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); 64 + MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); 65 + MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); 66 + MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); 67 + MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); 68 + MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); 69 + MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); 70 + MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); 71 + MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); 72 + MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); 73 + MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); 74 + 75 + MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); 76 + MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); 77 + MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); 78 + MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); 79 + MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); 80 + MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); 81 + MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); 82 + MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); 83 + MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); 84 + MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); 85 + MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); 86 + MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); 87 + 88 + static const struct gf100_gr_fwif 89 + gp104_gr_fwif[] = { 90 + { 0, gm200_gr_load, &gp104_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, 91 + {} 92 + }; 93 + 62 94 int 63 95 gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 64 96 { 65 - return gm200_gr_new_(&gp104_gr, device, index, pgr); 97 + return gf100_gr_new_(gp104_gr_fwif, device, index, pgr); 66 98 }
+21 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
··· 26 26 27 27 #include <nvif/class.h> 28 28 29 - static const struct gf100_gr_func 29 + const struct gf100_gr_func 30 30 gp107_gr = { 31 31 .oneinit_tiles = gm200_gr_oneinit_tiles, 32 32 .oneinit_sm_id = gm200_gr_oneinit_sm_id, ··· 61 61 } 62 62 }; 63 63 64 + MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); 65 + MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); 66 + MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); 67 + MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); 68 + MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); 69 + MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); 70 + MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); 71 + MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); 72 + MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); 73 + MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); 74 + MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); 75 + MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); 76 + 77 + static const struct gf100_gr_fwif 78 + gp107_gr_fwif[] = { 79 + { 0, gm200_gr_load, &gp107_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr }, 80 + {} 81 + }; 82 + 64 83 int 65 84 gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 66 85 { 67 - return gm200_gr_new_(&gp107_gr, device, index, pgr); 86 + return gf100_gr_new_(gp107_gr_fwif, device, index, pgr); 68 87 }
+97
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "gf100.h" 23 + 24 + #include <subdev/acr.h> 25 + 26 + #include <nvfw/flcn.h> 27 + 28 + static void 29 + gp108_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 30 + { 31 + struct flcn_bl_dmem_desc_v2 hdr; 32 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 33 + hdr.code_dma_base = hdr.code_dma_base + adjust; 34 + hdr.data_dma_base = hdr.data_dma_base + adjust; 35 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 36 + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); 37 + } 38 + 39 + static void 40 + gp108_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld, 41 + struct nvkm_acr_lsfw *lsfw) 42 + { 43 + const u64 base = lsfw->offset.img + lsfw->app_start_offset; 44 + const u64 code = base + lsfw->app_resident_code_offset; 45 + const u64 data = base + lsfw->app_resident_data_offset; 46 + const struct flcn_bl_dmem_desc_v2 hdr = { 47 + .ctx_dma = FALCON_DMAIDX_UCODE, 48 + .code_dma_base = code, 49 + .non_sec_code_off = lsfw->app_resident_code_offset, 50 + .non_sec_code_size = lsfw->app_resident_code_size, 51 + .code_entry_point = lsfw->app_imem_entry, 52 + .data_dma_base = data, 53 + .data_size = lsfw->app_resident_data_size, 54 + }; 55 + 56 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 57 + } 58 + 59 + const struct nvkm_acr_lsf_func 60 + gp108_gr_gpccs_acr = { 61 + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, 62 + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), 63 + .bld_write = gp108_gr_acr_bld_write, 64 + .bld_patch = gp108_gr_acr_bld_patch, 65 + }; 66 + 67 + const struct nvkm_acr_lsf_func 68 + gp108_gr_fecs_acr = { 69 + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), 70 + .bld_write = gp108_gr_acr_bld_write, 71 + .bld_patch = gp108_gr_acr_bld_patch, 72 + }; 73 + 74 + MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); 75 + MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); 76 + MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); 77 + MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); 78 + MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); 79 + MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); 80 + MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); 81 + MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); 82 + MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); 83 + MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); 84 + MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); 85 + MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); 86 + 87 + static const struct gf100_gr_fwif 88 + gp108_gr_fwif[] = { 89 + { 0, gm200_gr_load, &gp107_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, 90 + {} 91 + }; 92 + 93 + int 94 + gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 95 + { 96 + return gf100_gr_new_(gp108_gr_fwif, device, index, pgr); 97 + }
+34 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
··· 23 23 #include "gf100.h" 24 24 #include "ctxgf100.h" 25 25 26 + #include <subdev/acr.h> 27 + 26 28 #include <nvif/class.h> 29 + 30 + #include <nvfw/flcn.h> 31 + 32 + static const struct nvkm_acr_lsf_func 33 + gp10b_gr_gpccs_acr = { 34 + .flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD, 35 + .bld_size = sizeof(struct flcn_bl_dmem_desc), 36 + .bld_write = gm20b_gr_acr_bld_write, 37 + .bld_patch = gm20b_gr_acr_bld_patch, 38 + }; 27 39 28 40 static const struct gf100_gr_func 29 41 gp10b_gr = { ··· 71 59 } 72 60 }; 73 61 62 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 63 + MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); 64 + MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); 65 + MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); 66 + MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); 67 + MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); 68 + MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); 69 + MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); 70 + MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); 71 + MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); 72 + MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); 73 + MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); 74 + MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); 75 + #endif 76 + 77 + static const struct gf100_gr_fwif 78 + gp10b_gr_fwif[] = { 79 + { 0, gm200_gr_load, &gp10b_gr, &gm20b_gr_fecs_acr, &gp10b_gr_gpccs_acr }, 80 + {} 81 + }; 82 + 74 83 int 75 84 gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 76 85 { 77 - return gm200_gr_new_(&gp10b_gr, device, index, pgr); 86 + return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr); 78 87 }
+24 -5
drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
··· 45 45 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr); 46 46 } 47 47 48 - static void 48 + void 49 49 gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 50 50 { 51 51 gv100_gr_trap_sm(gr, gpc, tpc, 0); ··· 59 59 nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000); 60 60 } 61 61 62 - static void 62 + void 63 63 gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) 64 64 { 65 65 struct nvkm_device *device = gr->base.engine.subdev.device; ··· 71 71 } 72 72 } 73 73 74 - static void 74 + void 75 75 gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc) 76 76 { 77 77 struct nvkm_device *device = gr->base.engine.subdev.device; 78 78 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000); 79 79 } 80 80 81 - static void 81 + void 82 82 gv100_gr_init_419bd8(struct gf100_gr *gr) 83 83 { 84 84 struct nvkm_device *device = gr->base.engine.subdev.device; ··· 120 120 } 121 121 }; 122 122 123 + MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); 124 + MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); 125 + MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); 126 + MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); 127 + MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); 128 + MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); 129 + MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); 130 + MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); 131 + MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); 132 + MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); 133 + MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); 134 + MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); 135 + 136 + static const struct gf100_gr_fwif 137 + gv100_gr_fwif[] = { 138 + { 0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, 139 + {} 140 + }; 141 + 123 142 int 124 143 gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 125 144 { 126 - return gm200_gr_new_(&gv100_gr, device, index, pgr); 145 + return gf100_gr_new_(gv100_gr_fwif, device, index, pgr); 127 146 }
+177
drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "gf100.h" 23 + #include "ctxgf100.h" 24 + 25 + #include <nvif/class.h> 26 + 27 + static void 28 + tu102_gr_init_fecs_exceptions(struct gf100_gr *gr) 29 + { 30 + nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, 0x006f0002); 31 + } 32 + 33 + static void 34 + tu102_gr_init_fs(struct gf100_gr *gr) 35 + { 36 + struct nvkm_device *device = gr->base.engine.subdev.device; 37 + int sm; 38 + 39 + gp100_grctx_generate_smid_config(gr); 40 + gk104_grctx_generate_gpc_tpc_nr(gr); 41 + 42 + for (sm = 0; sm < gr->sm_nr; sm++) { 43 + nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 + 44 + gr->sm[sm].tpc * 4), sm); 45 + } 46 + 47 + gm200_grctx_generate_dist_skip_table(gr); 48 + gf100_gr_init_num_tpc_per_gpc(gr, true, true); 49 + } 50 + 51 + static void 52 + tu102_gr_init_zcull(struct gf100_gr *gr) 53 + { 54 + struct nvkm_device *device = gr->base.engine.subdev.device; 55 + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 56 + const u8 tile_nr = ALIGN(gr->tpc_total, 64); 57 + u8 bank[GPC_MAX] = {}, gpc, i, j; 58 + u32 data; 59 + 60 + for (i = 0; i < tile_nr; i += 8) { 61 + for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { 62 + data |= bank[gr->tile[i + j]] << (j * 4); 63 + bank[gr->tile[i + j]]++; 64 + } 65 + nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); 66 + } 67 + 68 + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 69 + nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 70 + gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); 71 + nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 72 + gr->tpc_total); 73 + nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 74 + } 75 + 76 + nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); 77 + } 78 + 79 + static void 80 + tu102_gr_init_gpc_mmu(struct gf100_gr *gr) 81 + { 82 + struct nvkm_device *device = gr->base.engine.subdev.device; 83 + 84 + nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf8001fff); 85 + nvkm_wr32(device, 0x418890, 0x00000000); 86 + nvkm_wr32(device, 0x418894, 0x00000000); 87 + 88 + nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); 89 + nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); 90 + nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); 91 + } 92 + 93 + static const struct gf100_gr_func 94 + tu102_gr = { 95 + .oneinit_tiles = gm200_gr_oneinit_tiles, 96 + .oneinit_sm_id = gm200_gr_oneinit_sm_id, 97 + .init = gf100_gr_init, 98 + .init_419bd8 = gv100_gr_init_419bd8, 99 + .init_gpc_mmu = tu102_gr_init_gpc_mmu, 100 + .init_vsc_stream_master = gk104_gr_init_vsc_stream_master, 101 + .init_zcull = tu102_gr_init_zcull, 102 + .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, 103 + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, 104 + .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, 105 + .init_fs = tu102_gr_init_fs, 106 + .init_fecs_exceptions = tu102_gr_init_fecs_exceptions, 107 + .init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2, 108 + .init_sked_hww_esr = gk104_gr_init_sked_hww_esr, 109 + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, 110 + .init_504430 = gv100_gr_init_504430, 111 + .init_shader_exceptions = gv100_gr_init_shader_exceptions, 112 + .trap_mp = gv100_gr_trap_mp, 113 + .rops = gm200_gr_rops, 114 + .gpc_nr = 6, 115 + .tpc_nr = 5, 116 + .ppc_nr = 3, 117 + .grctx = &tu102_grctx, 118 + .zbc = &gp102_gr_zbc, 119 + .sclass = { 120 + { -1, -1, FERMI_TWOD_A }, 121 + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 122 + { -1, -1, TURING_A, &gf100_fermi }, 123 + { -1, -1, TURING_COMPUTE_A }, 124 + {} 125 + } 126 + }; 127 + 128 + MODULE_FIRMWARE("nvidia/tu102/gr/fecs_bl.bin"); 129 + MODULE_FIRMWARE("nvidia/tu102/gr/fecs_inst.bin"); 130 + MODULE_FIRMWARE("nvidia/tu102/gr/fecs_data.bin"); 131 + MODULE_FIRMWARE("nvidia/tu102/gr/fecs_sig.bin"); 132 + MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_bl.bin"); 133 + MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_inst.bin"); 134 + MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_data.bin"); 135 + MODULE_FIRMWARE("nvidia/tu102/gr/gpccs_sig.bin"); 136 + MODULE_FIRMWARE("nvidia/tu102/gr/sw_ctx.bin"); 137 + MODULE_FIRMWARE("nvidia/tu102/gr/sw_nonctx.bin"); 138 + MODULE_FIRMWARE("nvidia/tu102/gr/sw_bundle_init.bin"); 139 + MODULE_FIRMWARE("nvidia/tu102/gr/sw_method_init.bin"); 140 + 141 + MODULE_FIRMWARE("nvidia/tu104/gr/fecs_bl.bin"); 142 + MODULE_FIRMWARE("nvidia/tu104/gr/fecs_inst.bin"); 143 + MODULE_FIRMWARE("nvidia/tu104/gr/fecs_data.bin"); 144 + MODULE_FIRMWARE("nvidia/tu104/gr/fecs_sig.bin"); 145 + MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_bl.bin"); 146 + MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_inst.bin"); 147 + MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_data.bin"); 148 + MODULE_FIRMWARE("nvidia/tu104/gr/gpccs_sig.bin"); 149 + MODULE_FIRMWARE("nvidia/tu104/gr/sw_ctx.bin"); 150 + MODULE_FIRMWARE("nvidia/tu104/gr/sw_nonctx.bin"); 151 + MODULE_FIRMWARE("nvidia/tu104/gr/sw_bundle_init.bin"); 152 + MODULE_FIRMWARE("nvidia/tu104/gr/sw_method_init.bin"); 153 + 154 + MODULE_FIRMWARE("nvidia/tu106/gr/fecs_bl.bin"); 155 + MODULE_FIRMWARE("nvidia/tu106/gr/fecs_inst.bin"); 156 + MODULE_FIRMWARE("nvidia/tu106/gr/fecs_data.bin"); 157 + MODULE_FIRMWARE("nvidia/tu106/gr/fecs_sig.bin"); 158 + MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_bl.bin"); 159 + MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_inst.bin"); 160 + MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_data.bin"); 161 + MODULE_FIRMWARE("nvidia/tu106/gr/gpccs_sig.bin"); 162 + MODULE_FIRMWARE("nvidia/tu106/gr/sw_ctx.bin"); 163 + MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin"); 164 + MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin"); 165 + MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin"); 166 + 167 + static const struct gf100_gr_fwif 168 + tu102_gr_fwif[] = { 169 + { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, 170 + {} 171 + }; 172 + 173 + int 174 + tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 175 + { 176 + return gf100_gr_new_(tu102_gr_fwif, device, index, pgr); 177 + }
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 2 nvkm-y += nvkm/engine/nvdec/base.o 3 - nvkm-y += nvkm/engine/nvdec/gp102.o 3 + nvkm-y += nvkm/engine/nvdec/gm107.o
+18 -24
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
··· 20 20 * DEALINGS IN THE SOFTWARE. 21 21 */ 22 22 #include "priv.h" 23 - 24 - #include <subdev/top.h> 25 - #include <engine/falcon.h> 26 - 27 - static int 28 - nvkm_nvdec_oneinit(struct nvkm_engine *engine) 29 - { 30 - struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); 31 - struct nvkm_subdev *subdev = &nvdec->engine.subdev; 32 - 33 - nvdec->addr = nvkm_top_addr(subdev->device, subdev->index); 34 - if (!nvdec->addr) 35 - return -EINVAL; 36 - 37 - /*XXX: fix naming of this when adding support for multiple-NVDEC */ 38 - return nvkm_falcon_v1_new(subdev, "NVDEC", nvdec->addr, 39 - &nvdec->falcon); 40 - } 23 + #include <core/firmware.h> 41 24 42 25 static void * 43 26 nvkm_nvdec_dtor(struct nvkm_engine *engine) 44 27 { 45 28 struct nvkm_nvdec *nvdec = nvkm_nvdec(engine); 46 - nvkm_falcon_del(&nvdec->falcon); 29 + nvkm_falcon_dtor(&nvdec->falcon); 47 30 return nvdec; 48 31 } 49 32 50 33 static const struct nvkm_engine_func 51 34 nvkm_nvdec = { 52 35 .dtor = nvkm_nvdec_dtor, 53 - .oneinit = nvkm_nvdec_oneinit, 54 36 }; 55 37 56 38 int 57 - nvkm_nvdec_new_(struct nvkm_device *device, int index, 58 - struct nvkm_nvdec **pnvdec) 39 + nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, 40 + int index, struct nvkm_nvdec **pnvdec) 59 41 { 60 42 struct nvkm_nvdec *nvdec; 43 + int ret; 61 44 62 45 if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL))) 63 46 return -ENOMEM; 64 47 65 - return nvkm_engine_ctor(&nvkm_nvdec, device, index, true, 66 - &nvdec->engine); 48 + ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true, 49 + &nvdec->engine); 50 + if (ret) 51 + return ret; 52 + 53 + fwif = nvkm_firmware_load(&nvdec->engine.subdev, fwif, "Nvdec", nvdec); 54 + if (IS_ERR(fwif)) 55 + return -ENODEV; 56 + 57 + nvdec->func = fwif->func; 58 + 59 + return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev, 60 + nvkm_subdev_name[index], 0, &nvdec->falcon); 67 61 };
+63
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
··· 1 + /* 2 + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 + * DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + static const struct nvkm_falcon_func 25 + gm107_nvdec_flcn = { 26 + .debug = 0xd00, 27 + .fbif = 0x600, 28 + .load_imem = nvkm_falcon_v1_load_imem, 29 + .load_dmem = nvkm_falcon_v1_load_dmem, 30 + .read_dmem = nvkm_falcon_v1_read_dmem, 31 + .bind_context = nvkm_falcon_v1_bind_context, 32 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 33 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 34 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 35 + .start = nvkm_falcon_v1_start, 36 + .enable = nvkm_falcon_v1_enable, 37 + .disable = nvkm_falcon_v1_disable, 38 + }; 39 + 40 + static const struct nvkm_nvdec_func 41 + gm107_nvdec = { 42 + .flcn = &gm107_nvdec_flcn, 43 + }; 44 + 45 + static int 46 + gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver, 47 + const struct nvkm_nvdec_fwif *fwif) 48 + { 49 + return 0; 50 + } 51 + 52 + static const struct nvkm_nvdec_fwif 53 + gm107_nvdec_fwif[] = { 54 + { -1, gm107_nvdec_nofw, &gm107_nvdec }, 55 + {} 56 + }; 57 + 58 + int 59 + gm107_nvdec_new(struct nvkm_device *device, int index, 60 + struct nvkm_nvdec **pnvdec) 61 + { 62 + return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec); 63 + }
-30
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "priv.h" 24 - 25 - int 26 - gp102_nvdec_new(struct nvkm_device *device, int index, 27 - struct nvkm_nvdec **pnvdec) 28 - { 29 - return nvkm_nvdec_new_(device, index, pnvdec); 30 - }
+13 -1
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
··· 3 3 #define __NVKM_NVDEC_PRIV_H__ 4 4 #include <engine/nvdec.h> 5 5 6 - int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **); 6 + struct nvkm_nvdec_func { 7 + const struct nvkm_falcon_func *flcn; 8 + }; 9 + 10 + struct nvkm_nvdec_fwif { 11 + int version; 12 + int (*load)(struct nvkm_nvdec *, int ver, 13 + const struct nvkm_nvdec_fwif *); 14 + const struct nvkm_nvdec_func *func; 15 + }; 16 + 17 + int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, 18 + struct nvkm_device *, int, struct nvkm_nvdec **); 7 19 #endif
+2 -1
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 - #nvkm-y += nvkm/engine/nvenc/base.o 2 + nvkm-y += nvkm/engine/nvenc/base.o 3 + nvkm-y += nvkm/engine/nvenc/gm107.o
+63
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include "priv.h" 25 + #include <core/firmware.h> 26 + 27 + static void * 28 + nvkm_nvenc_dtor(struct nvkm_engine *engine) 29 + { 30 + struct nvkm_nvenc *nvenc = nvkm_nvenc(engine); 31 + nvkm_falcon_dtor(&nvenc->falcon); 32 + return nvenc; 33 + } 34 + 35 + static const struct nvkm_engine_func 36 + nvkm_nvenc = { 37 + .dtor = nvkm_nvenc_dtor, 38 + }; 39 + 40 + int 41 + nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, 42 + int index, struct nvkm_nvenc **pnvenc) 43 + { 44 + struct nvkm_nvenc *nvenc; 45 + int ret; 46 + 47 + if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL))) 48 + return -ENOMEM; 49 + 50 + ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true, 51 + &nvenc->engine); 52 + if (ret) 53 + return ret; 54 + 55 + fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc); 56 + if (IS_ERR(fwif)) 57 + return -ENODEV; 58 + 59 + nvenc->func = fwif->func; 60 + 61 + return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev, 62 + nvkm_subdev_name[index], 0, &nvenc->falcon); 63 + };
+63
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + 23 + #include "priv.h" 24 + 25 + static const struct nvkm_falcon_func 26 + gm107_nvenc_flcn = { 27 + .fbif = 0x800, 28 + .load_imem = nvkm_falcon_v1_load_imem, 29 + .load_dmem = nvkm_falcon_v1_load_dmem, 30 + .read_dmem = nvkm_falcon_v1_read_dmem, 31 + .bind_context = nvkm_falcon_v1_bind_context, 32 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 33 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 34 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 35 + .start = nvkm_falcon_v1_start, 36 + .enable = nvkm_falcon_v1_enable, 37 + .disable = nvkm_falcon_v1_disable, 38 + }; 39 + 40 + static const struct nvkm_nvenc_func 41 + gm107_nvenc = { 42 + .flcn = &gm107_nvenc_flcn, 43 + }; 44 + 45 + static int 46 + gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver, 47 + const struct nvkm_nvenc_fwif *fwif) 48 + { 49 + return 0; 50 + } 51 + 52 + static const struct nvkm_nvenc_fwif 53 + gm107_nvenc_fwif[] = { 54 + { -1, gm107_nvenc_nofw, &gm107_nvenc }, 55 + {} 56 + }; 57 + 58 + int 59 + gm107_nvenc_new(struct nvkm_device *device, int index, 60 + struct nvkm_nvenc **pnvenc) 61 + { 62 + return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc); 63 + }
+19
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_NVENC_PRIV_H__ 3 + #define __NVKM_NVENC_PRIV_H__ 4 + #include <engine/nvenc.h> 5 + 6 + struct nvkm_nvenc_func { 7 + const struct nvkm_falcon_func *flcn; 8 + }; 9 + 10 + struct nvkm_nvenc_fwif { 11 + int version; 12 + int (*load)(struct nvkm_nvenc *, int ver, 13 + const struct nvkm_nvenc_fwif *); 14 + const struct nvkm_nvenc_func *func; 15 + }; 16 + 17 + int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, 18 + int, struct nvkm_nvenc **pnvenc); 19 + #endif
+1
drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 2 nvkm-y += nvkm/engine/sec2/base.o 3 3 nvkm-y += nvkm/engine/sec2/gp102.o 4 + nvkm-y += nvkm/engine/sec2/gp108.o 4 5 nvkm-y += nvkm/engine/sec2/tu102.o
+57 -56
drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
··· 21 21 */ 22 22 #include "priv.h" 23 23 24 - #include <core/msgqueue.h> 24 + #include <core/firmware.h> 25 25 #include <subdev/top.h> 26 - #include <engine/falcon.h> 27 - 28 - static void * 29 - nvkm_sec2_dtor(struct nvkm_engine *engine) 30 - { 31 - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 32 - nvkm_msgqueue_del(&sec2->queue); 33 - nvkm_falcon_del(&sec2->falcon); 34 - return sec2; 35 - } 36 - 37 - static void 38 - nvkm_sec2_intr(struct nvkm_engine *engine) 39 - { 40 - struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 41 - struct nvkm_subdev *subdev = &engine->subdev; 42 - struct nvkm_device *device = subdev->device; 43 - u32 disp = nvkm_rd32(device, sec2->addr + 0x01c); 44 - u32 intr = nvkm_rd32(device, sec2->addr + 0x008) & disp & ~(disp >> 16); 45 - 46 - if (intr & 0x00000040) { 47 - schedule_work(&sec2->work); 48 - nvkm_wr32(device, sec2->addr + 0x004, 0x00000040); 49 - intr &= ~0x00000040; 50 - } 51 - 52 - if (intr) { 53 - nvkm_error(subdev, "unhandled intr %08x\n", intr); 54 - nvkm_wr32(device, sec2->addr + 0x004, intr); 55 - 56 - } 57 - } 58 26 59 27 static void 60 28 nvkm_sec2_recv(struct work_struct *work) 61 29 { 62 30 struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work); 63 31 64 - if (!sec2->queue) { 65 - nvkm_warn(&sec2->engine.subdev, 66 - "recv function called while no firmware set!\n"); 67 - return; 32 + if (!sec2->initmsg_received) { 33 + int ret = sec2->func->initmsg(sec2); 34 + if (ret) { 35 + nvkm_error(&sec2->engine.subdev, 36 + "error parsing init message: %d\n", ret); 37 + return; 38 + } 39 + 40 + sec2->initmsg_received = true; 68 41 } 69 42 70 - nvkm_msgqueue_recv(sec2->queue); 43 + nvkm_falcon_msgq_recv(sec2->msgq); 71 44 } 72 45 73 - 74 - static int 75 - nvkm_sec2_oneinit(struct nvkm_engine *engine) 46 + static void 47 + nvkm_sec2_intr(struct nvkm_engine *engine) 76 48 { 77 49 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 78 - struct nvkm_subdev *subdev = &sec2->engine.subdev; 79 - 80 - if (!sec2->addr) { 81 - sec2->addr = nvkm_top_addr(subdev->device, subdev->index); 82 - if (WARN_ON(!sec2->addr)) 83 - return -EINVAL; 84 - } 85 - 86 - return nvkm_falcon_v1_new(subdev, "SEC2", sec2->addr, &sec2->falcon); 50 + sec2->func->intr(sec2); 87 51 } 88 52 89 53 static int 90 54 nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) 91 55 { 92 56 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 57 + 93 58 flush_work(&sec2->work); 59 + 60 + if (suspend) { 61 + nvkm_falcon_cmdq_fini(sec2->cmdq); 62 + sec2->initmsg_received = false; 63 + } 64 + 94 65 return 0; 66 + } 67 + 68 + static void * 69 + nvkm_sec2_dtor(struct nvkm_engine *engine) 70 + { 71 + struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 72 + nvkm_falcon_msgq_del(&sec2->msgq); 73 + nvkm_falcon_cmdq_del(&sec2->cmdq); 74 + nvkm_falcon_qmgr_del(&sec2->qmgr); 75 + nvkm_falcon_dtor(&sec2->falcon); 76 + return sec2; 95 77 } 96 78 97 79 static const struct nvkm_engine_func 98 80 nvkm_sec2 = { 99 81 .dtor = nvkm_sec2_dtor, 100 - .oneinit = nvkm_sec2_oneinit, 101 82 .fini = nvkm_sec2_fini, 102 83 .intr = nvkm_sec2_intr, 103 84 }; 104 85 105 86 int 106 - nvkm_sec2_new_(struct nvkm_device *device, int index, u32 addr, 107 - struct nvkm_sec2 **psec2) 87 + nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, 88 + int index, u32 addr, struct nvkm_sec2 **psec2) 108 89 { 109 90 struct nvkm_sec2 *sec2; 91 + int ret; 110 92 111 93 if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL))) 112 94 return -ENOMEM; 113 - sec2->addr = addr; 114 - INIT_WORK(&sec2->work, nvkm_sec2_recv); 115 95 116 - return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); 96 + ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); 97 + if (ret) 98 + return ret; 99 + 100 + fwif = nvkm_firmware_load(&sec2->engine.subdev, fwif, "Sec2", sec2); 101 + if (IS_ERR(fwif)) 102 + return PTR_ERR(fwif); 103 + 104 + sec2->func = fwif->func; 105 + 106 + ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev, 107 + nvkm_subdev_name[index], addr, &sec2->falcon); 108 + if (ret) 109 + return ret; 110 + 111 + if ((ret = nvkm_falcon_qmgr_new(&sec2->falcon, &sec2->qmgr)) || 112 + (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || 113 + (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq))) 114 + return ret; 115 + 116 + INIT_WORK(&sec2->work, nvkm_sec2_recv); 117 + return 0; 117 118 };
+309 -5
drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
··· 19 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 20 * DEALINGS IN THE SOFTWARE. 21 21 */ 22 - 23 22 #include "priv.h" 24 23 25 - int 26 - gp102_sec2_new(struct nvkm_device *device, int index, 27 - struct nvkm_sec2 **psec2) 24 + #include <core/memory.h> 25 + #include <subdev/acr.h> 26 + #include <subdev/timer.h> 27 + 28 + #include <nvfw/flcn.h> 29 + #include <nvfw/sec2.h> 30 + 31 + static int 32 + gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr) 28 33 { 29 - return nvkm_sec2_new_(device, index, 0, psec2); 34 + struct nv_sec2_acr_bootstrap_falcon_msg *msg = 35 + container_of(hdr, typeof(*msg), msg.hdr); 36 + struct nvkm_subdev *subdev = priv; 37 + const char *name = nvkm_acr_lsf_id(msg->falcon_id); 38 + 39 + if (msg->error_code) { 40 + nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for " 41 + "falcon %d [%s]: %08x\n", 42 + msg->falcon_id, name, msg->error_code); 43 + return -EINVAL; 44 + } 45 + 46 + nvkm_debug(subdev, "%s booted\n", name); 47 + return 0; 48 + } 49 + 50 + static int 51 + gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon, 52 + enum nvkm_acr_lsf_id id) 53 + { 54 + struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon); 55 + struct nv_sec2_acr_bootstrap_falcon_cmd cmd = { 56 + .cmd.hdr.unit_id = sec2->func->unit_acr, 57 + .cmd.hdr.size = sizeof(cmd), 58 + .cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON, 59 + .flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, 60 + .falcon_id = id, 61 + }; 62 + 63 + return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, 64 + gp102_sec2_acr_bootstrap_falcon_callback, 65 + &sec2->engine.subdev, 66 + msecs_to_jiffies(1000)); 67 + } 68 + 69 + static int 70 + gp102_sec2_acr_boot(struct nvkm_falcon *falcon) 71 + { 72 + struct nv_sec2_args args = {}; 73 + nvkm_falcon_load_dmem(falcon, &args, 74 + falcon->func->emem_addr, sizeof(args), 0); 75 + nvkm_falcon_start(falcon); 76 + return 0; 77 + } 78 + 79 + static void 80 + gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 81 + { 82 + struct loader_config_v1 hdr; 83 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 84 + hdr.code_dma_base = hdr.code_dma_base + adjust; 85 + hdr.data_dma_base = hdr.data_dma_base + adjust; 86 + hdr.overlay_dma_base = hdr.overlay_dma_base + adjust; 87 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 88 + loader_config_v1_dump(&acr->subdev, &hdr); 89 + } 90 + 91 + static void 92 + gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld, 93 + struct nvkm_acr_lsfw *lsfw) 94 + { 95 + const struct loader_config_v1 hdr = { 96 + .dma_idx = FALCON_SEC2_DMAIDX_UCODE, 97 + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, 98 + .code_size_total = lsfw->app_size, 99 + .code_size_to_load = lsfw->app_resident_code_size, 100 + .code_entry_point = lsfw->app_imem_entry, 101 + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + 102 + lsfw->app_resident_data_offset, 103 + .data_size = lsfw->app_resident_data_size, 104 + .overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset, 105 + .argc = 1, 106 + .argv = lsfw->falcon->func->emem_addr, 107 + }; 108 + 109 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 110 + } 111 + 112 + static const struct nvkm_acr_lsf_func 113 + gp102_sec2_acr_0 = { 114 + .bld_size = sizeof(struct loader_config_v1), 115 + .bld_write = gp102_sec2_acr_bld_write, 116 + .bld_patch = gp102_sec2_acr_bld_patch, 117 + .boot = gp102_sec2_acr_boot, 118 + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, 119 + }; 120 + 121 + int 122 + gp102_sec2_initmsg(struct nvkm_sec2 *sec2) 123 + { 124 + struct nv_sec2_init_msg msg; 125 + int ret, i; 126 + 127 + ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg)); 128 + if (ret) 129 + return ret; 130 + 131 + if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT || 132 + msg.msg_type != NV_SEC2_INIT_MSG_INIT) 133 + return -EINVAL; 134 + 135 + for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) { 136 + if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) { 137 + nvkm_falcon_msgq_init(sec2->msgq, 138 + msg.queue_info[i].index, 139 + msg.queue_info[i].offset, 140 + msg.queue_info[i].size); 141 + } else { 142 + nvkm_falcon_cmdq_init(sec2->cmdq, 143 + msg.queue_info[i].index, 144 + msg.queue_info[i].offset, 145 + msg.queue_info[i].size); 146 + } 147 + } 148 + 149 + return 0; 150 + } 151 + 152 + void 153 + gp102_sec2_intr(struct nvkm_sec2 *sec2) 154 + { 155 + struct nvkm_subdev *subdev = &sec2->engine.subdev; 156 + struct nvkm_falcon *falcon = &sec2->falcon; 157 + u32 disp = nvkm_falcon_rd32(falcon, 0x01c); 158 + u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16); 159 + 160 + if (intr & 0x00000040) { 161 + schedule_work(&sec2->work); 162 + nvkm_falcon_wr32(falcon, 0x004, 0x00000040); 163 + intr &= ~0x00000040; 164 + } 165 + 166 + if (intr) { 167 + nvkm_error(subdev, "unhandled intr %08x\n", intr); 168 + nvkm_falcon_wr32(falcon, 0x004, intr); 169 + } 170 + } 171 + 172 + int 173 + gp102_sec2_flcn_enable(struct nvkm_falcon *falcon) 174 + { 175 + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001); 176 + udelay(10); 177 + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000); 178 + return nvkm_falcon_v1_enable(falcon); 179 + } 180 + 181 + void 182 + gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon, 183 + struct nvkm_memory *ctx) 184 + { 185 + struct nvkm_device *device = falcon->owner->device; 186 + 187 + nvkm_falcon_v1_bind_context(falcon, ctx); 188 + if (!ctx) 189 + return; 190 + 191 + /* Not sure if this is a WAR for a HW issue, or some additional 192 + * programming sequence that's needed to properly complete the 193 + * context switch we trigger above. 194 + * 195 + * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, 196 + * particularly when resuming from suspend. 197 + * 198 + * Also removes the need for an odd workaround where we needed 199 + * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before 200 + * the SEC2 RTOS would begin executing. 201 + */ 202 + nvkm_msec(device, 10, 203 + u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); 204 + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); 205 + if ((irqstat & 0x00000008) && 206 + (flcn0dc & 0x00007000) == 0x00005000) 207 + break; 208 + ); 209 + 210 + nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); 211 + nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); 212 + 213 + nvkm_msec(device, 10, 214 + u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); 215 + if ((flcn0dc & 0x00007000) == 0x00000000) 216 + break; 217 + ); 218 + } 219 + 220 + static const struct nvkm_falcon_func 221 + gp102_sec2_flcn = { 222 + .debug = 0x408, 223 + .fbif = 0x600, 224 + .load_imem = nvkm_falcon_v1_load_imem, 225 + .load_dmem = nvkm_falcon_v1_load_dmem, 226 + .read_dmem = nvkm_falcon_v1_read_dmem, 227 + .emem_addr = 0x01000000, 228 + .bind_context = gp102_sec2_flcn_bind_context, 229 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 230 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 231 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 232 + .start = nvkm_falcon_v1_start, 233 + .enable = gp102_sec2_flcn_enable, 234 + .disable = nvkm_falcon_v1_disable, 235 + .cmdq = { 0xa00, 0xa04, 8 }, 236 + .msgq = { 0xa30, 0xa34, 8 }, 237 + }; 238 + 239 + const struct nvkm_sec2_func 240 + gp102_sec2 = { 241 + .flcn = &gp102_sec2_flcn, 242 + .unit_acr = NV_SEC2_UNIT_ACR, 243 + .intr = gp102_sec2_intr, 244 + .initmsg = gp102_sec2_initmsg, 245 + }; 246 + 247 + MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); 248 + MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); 249 + MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); 250 + MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); 251 + MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); 252 + MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); 253 + MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); 254 + MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); 255 + MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); 256 + MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); 257 + MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); 258 + MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); 259 + 260 + static void 261 + gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust) 262 + { 263 + struct flcn_bl_dmem_desc_v2 hdr; 264 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 265 + hdr.code_dma_base = hdr.code_dma_base + adjust; 266 + hdr.data_dma_base = hdr.data_dma_base + adjust; 267 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 268 + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr); 269 + } 270 + 271 + static void 272 + gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld, 273 + struct nvkm_acr_lsfw *lsfw) 274 + { 275 + const struct flcn_bl_dmem_desc_v2 hdr = { 276 + .ctx_dma = FALCON_SEC2_DMAIDX_UCODE, 277 + .code_dma_base = lsfw->offset.img + lsfw->app_start_offset, 278 + .non_sec_code_off = lsfw->app_resident_code_offset, 279 + .non_sec_code_size = lsfw->app_resident_code_size, 280 + .code_entry_point = lsfw->app_imem_entry, 281 + .data_dma_base = lsfw->offset.img + lsfw->app_start_offset + 282 + lsfw->app_resident_data_offset, 283 + .data_size = lsfw->app_resident_data_size, 284 + .argc = 1, 285 + .argv = lsfw->falcon->func->emem_addr, 286 + }; 287 + 288 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 289 + } 290 + 291 + const struct nvkm_acr_lsf_func 292 + gp102_sec2_acr_1 = { 293 + .bld_size = sizeof(struct flcn_bl_dmem_desc_v2), 294 + .bld_write = gp102_sec2_acr_bld_write_1, 295 + .bld_patch = gp102_sec2_acr_bld_patch_1, 296 + .boot = gp102_sec2_acr_boot, 297 + .bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon, 298 + }; 299 + 300 + int 301 + gp102_sec2_load(struct nvkm_sec2 *sec2, int ver, 302 + const struct nvkm_sec2_fwif *fwif) 303 + { 304 + return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev, 305 + &sec2->falcon, 306 + NVKM_ACR_LSF_SEC2, "sec2/", 307 + ver, fwif->acr); 308 + } 309 + 310 + MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); 311 + MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); 312 + MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); 313 + MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); 314 + MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); 315 + MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); 316 + MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); 317 + MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); 318 + MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); 319 + MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); 320 + MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); 321 + MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin"); 322 + 323 + static const struct nvkm_sec2_fwif 324 + gp102_sec2_fwif[] = { 325 + { 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, 326 + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 }, 327 + {} 328 + }; 329 + 330 + int 331 + gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) 332 + { 333 + return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2); 30 334 }
+39
drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + #include <subdev/acr.h> 24 + 25 + MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); 26 + MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); 27 + MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); 28 + 29 + static const struct nvkm_sec2_fwif 30 + gp108_sec2_fwif[] = { 31 + { 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 }, 32 + {} 33 + }; 34 + 35 + int 36 + gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) 37 + { 38 + return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2); 39 + }
+22 -2
drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
··· 3 3 #define __NVKM_SEC2_PRIV_H__ 4 4 #include <engine/sec2.h> 5 5 6 - #define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine) 6 + struct nvkm_sec2_func { 7 + const struct nvkm_falcon_func *flcn; 8 + u8 unit_acr; 9 + void (*intr)(struct nvkm_sec2 *); 10 + int (*initmsg)(struct nvkm_sec2 *); 11 + }; 7 12 8 - int nvkm_sec2_new_(struct nvkm_device *, int, u32 addr, struct nvkm_sec2 **); 13 + void gp102_sec2_intr(struct nvkm_sec2 *); 14 + int gp102_sec2_initmsg(struct nvkm_sec2 *); 15 + 16 + struct nvkm_sec2_fwif { 17 + int version; 18 + int (*load)(struct nvkm_sec2 *, int ver, const struct nvkm_sec2_fwif *); 19 + const struct nvkm_sec2_func *func; 20 + const struct nvkm_acr_lsf_func *acr; 21 + }; 22 + 23 + int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *); 24 + extern const struct nvkm_sec2_func gp102_sec2; 25 + extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1; 26 + 27 + int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, 28 + int, u32 addr, struct nvkm_sec2 **); 9 29 #endif
+43 -4
drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
··· 19 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 20 * OTHER DEALINGS IN THE SOFTWARE. 21 21 */ 22 - 23 22 #include "priv.h" 23 + #include <subdev/acr.h> 24 + 25 + static const struct nvkm_falcon_func 26 + tu102_sec2_flcn = { 27 + .debug = 0x408, 28 + .fbif = 0x600, 29 + .load_imem = nvkm_falcon_v1_load_imem, 30 + .load_dmem = nvkm_falcon_v1_load_dmem, 31 + .read_dmem = nvkm_falcon_v1_read_dmem, 32 + .emem_addr = 0x01000000, 33 + .bind_context = gp102_sec2_flcn_bind_context, 34 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 35 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 36 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 37 + .start = nvkm_falcon_v1_start, 38 + .enable = nvkm_falcon_v1_enable, 39 + .disable = nvkm_falcon_v1_disable, 40 + .cmdq = { 0xc00, 0xc04, 8 }, 41 + .msgq = { 0xc80, 0xc84, 8 }, 42 + }; 43 + 44 + static const struct nvkm_sec2_func 45 + tu102_sec2 = { 46 + .flcn = &tu102_sec2_flcn, 47 + .unit_acr = 0x07, 48 + .intr = gp102_sec2_intr, 49 + .initmsg = gp102_sec2_initmsg, 50 + }; 51 + 52 + static int 53 + tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver, 54 + const struct nvkm_sec2_fwif *fwif) 55 + { 56 + return 0; 57 + } 58 + 59 + static const struct nvkm_sec2_fwif 60 + tu102_sec2_fwif[] = { 61 + { 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 }, 62 + { -1, tu102_sec2_nofw, &tu102_sec2 } 63 + }; 24 64 25 65 int 26 - tu102_sec2_new(struct nvkm_device *device, int index, 27 - struct nvkm_sec2 **psec2) 66 + tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) 28 67 { 29 68 /* TOP info wasn't updated on Turing to reflect the PRI 30 69 * address change for some reason. We override it here. 31 70 */ 32 - return nvkm_sec2_new_(device, index, 0x840000, psec2); 71 + return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2); 33 72 }
+3 -3
drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 2 nvkm-y += nvkm/falcon/base.o 3 + nvkm-y += nvkm/falcon/cmdq.o 4 + nvkm-y += nvkm/falcon/msgq.o 5 + nvkm-y += nvkm/falcon/qmgr.o 3 6 nvkm-y += nvkm/falcon/v1.o 4 - nvkm-y += nvkm/falcon/msgqueue.o 5 - nvkm-y += nvkm/falcon/msgqueue_0137c63d.o 6 - nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
+44 -43
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
··· 22 22 #include "priv.h" 23 23 24 24 #include <subdev/mc.h> 25 + #include <subdev/top.h> 25 26 26 27 void 27 28 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, ··· 135 134 return falcon->func->clear_interrupt(falcon, mask); 136 135 } 137 136 137 + static int 138 + nvkm_falcon_oneinit(struct nvkm_falcon *falcon) 139 + { 140 + const struct nvkm_falcon_func *func = falcon->func; 141 + const struct nvkm_subdev *subdev = falcon->owner; 142 + u32 reg; 143 + 144 + if (!falcon->addr) { 145 + falcon->addr = nvkm_top_addr(subdev->device, subdev->index); 146 + if (WARN_ON(!falcon->addr)) 147 + return -ENODEV; 148 + } 149 + 150 + reg = nvkm_falcon_rd32(falcon, 0x12c); 151 + falcon->version = reg & 0xf; 152 + falcon->secret = (reg >> 4) & 0x3; 153 + falcon->code.ports = (reg >> 8) & 0xf; 154 + falcon->data.ports = (reg >> 12) & 0xf; 155 + 156 + reg = nvkm_falcon_rd32(falcon, 0x108); 157 + falcon->code.limit = (reg & 0x1ff) << 8; 158 + falcon->data.limit = (reg & 0x3fe00) >> 1; 159 + 160 + if (func->debug) { 161 + u32 val = nvkm_falcon_rd32(falcon, func->debug); 162 + falcon->debug = (val >> 20) & 0x1; 163 + } 164 + 165 + return 0; 166 + } 167 + 138 168 void 139 169 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) 140 170 { ··· 183 151 int 184 152 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) 185 153 { 154 + int ret = 0; 155 + 186 156 mutex_lock(&falcon->mutex); 187 157 if (falcon->user) { 188 158 nvkm_error(user, "%s falcon already acquired by %s!\n", ··· 194 160 } 195 161 196 162 nvkm_debug(user, "acquired %s falcon\n", falcon->name); 163 + if (!falcon->oneinit) 164 + ret = nvkm_falcon_oneinit(falcon); 197 165 falcon->user = user; 198 166 mutex_unlock(&falcon->mutex); 199 - return 0; 167 + return ret; 200 168 } 201 169 202 170 void 171 + nvkm_falcon_dtor(struct nvkm_falcon *falcon) 172 + { 173 + } 174 + 175 + int 203 176 nvkm_falcon_ctor(const struct nvkm_falcon_func *func, 204 177 struct nvkm_subdev *subdev, const char *name, u32 addr, 205 178 struct nvkm_falcon *falcon) 206 179 { 207 - u32 debug_reg; 208 - u32 reg; 209 - 210 180 falcon->func = func; 211 181 falcon->owner = subdev; 212 182 falcon->name = name; 213 183 falcon->addr = addr; 214 184 mutex_init(&falcon->mutex); 215 185 mutex_init(&falcon->dmem_mutex); 216 - 217 - reg = nvkm_falcon_rd32(falcon, 0x12c); 218 - falcon->version = reg & 0xf; 219 - falcon->secret = (reg >> 4) & 0x3; 220 - falcon->code.ports = (reg >> 8) & 0xf; 221 - falcon->data.ports = (reg >> 12) & 0xf; 222 - 223 - reg = nvkm_falcon_rd32(falcon, 0x108); 224 - falcon->code.limit = (reg & 0x1ff) << 8; 225 - falcon->data.limit = (reg & 0x3fe00) >> 1; 226 - 227 - switch (subdev->index) { 228 - case NVKM_ENGINE_GR: 229 - debug_reg = 0x0; 230 - break; 231 - case NVKM_SUBDEV_PMU: 232 - debug_reg = 0xc08; 233 - break; 234 - case NVKM_ENGINE_NVDEC0: 235 - debug_reg = 0xd00; 236 - break; 237 - case NVKM_ENGINE_SEC2: 238 - debug_reg = 0x408; 239 - falcon->has_emem = true; 240 - break; 241 - case NVKM_SUBDEV_GSP: 242 - debug_reg = 0x0; /*XXX*/ 243 - break; 244 - default: 245 - nvkm_warn(subdev, "unsupported falcon %s!\n", 246 - nvkm_subdev_name[subdev->index]); 247 - debug_reg = 0; 248 - break; 249 - } 250 - 251 - if (debug_reg) { 252 - u32 val = nvkm_falcon_rd32(falcon, debug_reg); 253 - falcon->debug = (val >> 20) & 0x1; 254 - } 186 + return 0; 255 187 } 256 188 257 189 void 258 190 nvkm_falcon_del(struct nvkm_falcon **pfalcon) 259 191 { 260 192 if (*pfalcon) { 193 + nvkm_falcon_dtor(*pfalcon); 261 194 kfree(*pfalcon); 262 195 *pfalcon = NULL; 263 196 }
+214
drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
··· 1 + /* 2 + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include "qmgr.h" 24 + 25 + static bool 26 + nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) 27 + { 28 + u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); 29 + u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); 30 + u32 free; 31 + 32 + size = ALIGN(size, QUEUE_ALIGNMENT); 33 + 34 + if (head >= tail) { 35 + free = cmdq->offset + cmdq->size - head; 36 + free -= HDR_SIZE; 37 + 38 + if (size > free) { 39 + *rewind = true; 40 + head = cmdq->offset; 41 + } 42 + } 43 + 44 + if (head < tail) 45 + free = tail - head - 1; 46 + 47 + return size <= free; 48 + } 49 + 50 + static void 51 + nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) 52 + { 53 + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; 54 + nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); 55 + cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); 56 + } 57 + 58 + static void 59 + nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) 60 + { 61 + struct nv_falcon_cmd cmd; 62 + 63 + cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND; 64 + cmd.size = sizeof(cmd); 65 + nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size); 66 + 67 + cmdq->position = cmdq->offset; 68 + } 69 + 70 + static int 71 + nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size) 72 + { 73 + struct nvkm_falcon *falcon = cmdq->qmgr->falcon; 74 + bool rewind = false; 75 + 76 + mutex_lock(&cmdq->mutex); 77 + 78 + if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) { 79 + FLCNQ_DBG(cmdq, "queue full"); 80 + mutex_unlock(&cmdq->mutex); 81 + return -EAGAIN; 82 + } 83 + 84 + cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg); 85 + 86 + if (rewind) 87 + nvkm_falcon_cmdq_rewind(cmdq); 88 + 89 + return 0; 90 + } 91 + 92 + static void 93 + nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq) 94 + { 95 + nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position); 96 + mutex_unlock(&cmdq->mutex); 97 + } 98 + 99 + static int 100 + nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd) 101 + { 102 + static unsigned timeout = 2000; 103 + unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 104 + int ret = -EAGAIN; 105 + 106 + while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) 107 + ret = nvkm_falcon_cmdq_open(cmdq, cmd->size); 108 + if (ret) { 109 + FLCNQ_ERR(cmdq, "timeout waiting for queue space"); 110 + return ret; 111 + } 112 + 113 + nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size); 114 + nvkm_falcon_cmdq_close(cmdq); 115 + return ret; 116 + } 117 + 118 + /* specifies that we want to know the command status in the answer message */ 119 + #define CMD_FLAGS_STATUS BIT(0) 120 + /* specifies that we want an interrupt when the answer message is queued */ 121 + #define CMD_FLAGS_INTR BIT(1) 122 + 123 + int 124 + nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd, 125 + nvkm_falcon_qmgr_callback cb, void *priv, 126 + unsigned long timeout) 127 + { 128 + struct nvkm_falcon_qmgr_seq *seq; 129 + int ret; 130 + 131 + if (!wait_for_completion_timeout(&cmdq->ready, 132 + msecs_to_jiffies(1000))) { 133 + FLCNQ_ERR(cmdq, "timeout waiting for queue ready"); 134 + return -ETIMEDOUT; 135 + } 136 + 137 + seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr); 138 + if (IS_ERR(seq)) 139 + return PTR_ERR(seq); 140 + 141 + cmd->seq_id = seq->id; 142 + cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; 143 + 144 + seq->state = SEQ_STATE_USED; 145 + seq->async = !timeout; 146 + seq->callback = cb; 147 + seq->priv = priv; 148 + 149 + ret = nvkm_falcon_cmdq_write(cmdq, cmd); 150 + if (ret) { 151 + seq->state = SEQ_STATE_PENDING; 152 + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); 153 + return ret; 154 + } 155 + 156 + if (!seq->async) { 157 + if (!wait_for_completion_timeout(&seq->done, timeout)) { 158 + FLCNQ_ERR(cmdq, "timeout waiting for reply"); 159 + return -ETIMEDOUT; 160 + } 161 + ret = seq->result; 162 + nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq); 163 + } 164 + 165 + return ret; 166 + } 167 + 168 + void 169 + nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq) 170 + { 171 + reinit_completion(&cmdq->ready); 172 + } 173 + 174 + void 175 + nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq, 176 + u32 index, u32 offset, u32 size) 177 + { 178 + const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func; 179 + 180 + cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride; 181 + cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride; 182 + cmdq->offset = offset; 183 + cmdq->size = size; 184 + complete_all(&cmdq->ready); 185 + 186 + FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x", 187 + index, cmdq->offset, cmdq->size); 188 + } 189 + 190 + void 191 + nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq) 192 + { 193 + struct nvkm_falcon_cmdq *cmdq = *pcmdq; 194 + if (cmdq) { 195 + kfree(*pcmdq); 196 + *pcmdq = NULL; 197 + } 198 + } 199 + 200 + int 201 + nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, 202 + struct nvkm_falcon_cmdq **pcmdq) 203 + { 204 + struct nvkm_falcon_cmdq *cmdq = *pcmdq; 205 + 206 + if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL))) 207 + return -ENOMEM; 208 + 209 + cmdq->qmgr = qmgr; 210 + cmdq->name = name; 211 + mutex_init(&cmdq->mutex); 212 + init_completion(&cmdq->ready); 213 + return 0; 214 + }
+213
drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
··· 1 + /* 2 + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include "qmgr.h" 24 + 25 + static void 26 + nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) 27 + { 28 + mutex_lock(&msgq->mutex); 29 + msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); 30 + } 31 + 32 + static void 33 + nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) 34 + { 35 + struct nvkm_falcon *falcon = msgq->qmgr->falcon; 36 + 37 + if (commit) 38 + nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); 39 + 40 + mutex_unlock(&msgq->mutex); 41 + } 42 + 43 + static bool 44 + nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq) 45 + { 46 + u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg); 47 + u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); 48 + return head == tail; 49 + } 50 + 51 + static int 52 + nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size) 53 + { 54 + struct nvkm_falcon *falcon = msgq->qmgr->falcon; 55 + u32 head, tail, available; 56 + 57 + head = nvkm_falcon_rd32(falcon, msgq->head_reg); 58 + /* has the buffer looped? */ 59 + if (head < msgq->position) 60 + msgq->position = msgq->offset; 61 + 62 + tail = msgq->position; 63 + 64 + available = head - tail; 65 + if (size > available) { 66 + FLCNQ_ERR(msgq, "requested %d bytes, but only %d available", 67 + size, available); 68 + return -EINVAL; 69 + } 70 + 71 + nvkm_falcon_read_dmem(falcon, tail, size, 0, data); 72 + msgq->position += ALIGN(size, QUEUE_ALIGNMENT); 73 + return 0; 74 + } 75 + 76 + static int 77 + nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) 78 + { 79 + int ret = 0; 80 + 81 + nvkm_falcon_msgq_open(msgq); 82 + 83 + if (nvkm_falcon_msgq_empty(msgq)) 84 + goto close; 85 + 86 + ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE); 87 + if (ret) { 88 + FLCNQ_ERR(msgq, "failed to read message header"); 89 + goto close; 90 + } 91 + 92 + if (hdr->size > MSG_BUF_SIZE) { 93 + FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size); 94 + ret = -ENOSPC; 95 + goto close; 96 + } 97 + 98 + if (hdr->size > HDR_SIZE) { 99 + u32 read_size = hdr->size - HDR_SIZE; 100 + 101 + ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size); 102 + if (ret) { 103 + FLCNQ_ERR(msgq, "failed to read message data"); 104 + goto close; 105 + } 106 + } 107 + 108 + ret = 1; 109 + close: 110 + nvkm_falcon_msgq_close(msgq, (ret >= 0)); 111 + return ret; 112 + } 113 + 114 + static int 115 + nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nv_falcon_msg *hdr) 116 + { 117 + struct nvkm_falcon_qmgr_seq *seq; 118 + 119 + seq = &msgq->qmgr->seq.id[hdr->seq_id]; 120 + if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { 121 + FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id); 122 + return -EINVAL; 123 + } 124 + 125 + if (seq->state == SEQ_STATE_USED) { 126 + if (seq->callback) 127 + seq->result = seq->callback(seq->priv, hdr); 128 + } 129 + 130 + if (seq->async) { 131 + nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq); 132 + return 0; 133 + } 134 + 135 + complete_all(&seq->done); 136 + return 0; 137 + } 138 + 139 + void 140 + nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq) 141 + { 142 + /* 143 + * We are invoked from a worker thread, so normally we have plenty of 144 + * stack space to work with. 145 + */ 146 + u8 msg_buffer[MSG_BUF_SIZE]; 147 + struct nv_falcon_msg *hdr = (void *)msg_buffer; 148 + 149 + while (nvkm_falcon_msgq_read(msgq, hdr) > 0) 150 + nvkm_falcon_msgq_exec(msgq, hdr); 151 + } 152 + 153 + int 154 + nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq, 155 + void *data, u32 size) 156 + { 157 + struct nvkm_falcon *falcon = msgq->qmgr->falcon; 158 + struct nv_falcon_msg *hdr = data; 159 + int ret; 160 + 161 + msgq->head_reg = falcon->func->msgq.head; 162 + msgq->tail_reg = falcon->func->msgq.tail; 163 + msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail); 164 + 165 + nvkm_falcon_msgq_open(msgq); 166 + ret = nvkm_falcon_msgq_pop(msgq, data, size); 167 + if (ret == 0 && hdr->size != size) { 168 + FLCN_ERR(falcon, "unexpected init message size %d vs %d", 169 + hdr->size, size); 170 + ret = -EINVAL; 171 + } 172 + nvkm_falcon_msgq_close(msgq, ret == 0); 173 + return ret; 174 + } 175 + 176 + void 177 + nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq, 178 + u32 index, u32 offset, u32 size) 179 + { 180 + const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func; 181 + 182 + msgq->head_reg = func->msgq.head + index * func->msgq.stride; 183 + msgq->tail_reg = func->msgq.tail + index * func->msgq.stride; 184 + msgq->offset = offset; 185 + 186 + FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x", 187 + index, msgq->offset, size); 188 + } 189 + 190 + void 191 + nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq) 192 + { 193 + struct nvkm_falcon_msgq *msgq = *pmsgq; 194 + if (msgq) { 195 + kfree(*pmsgq); 196 + *pmsgq = NULL; 197 + } 198 + } 199 + 200 + int 201 + nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, 202 + struct nvkm_falcon_msgq **pmsgq) 203 + { 204 + struct nvkm_falcon_msgq *msgq = *pmsgq; 205 + 206 + if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL))) 207 + return -ENOMEM; 208 + 209 + msgq->qmgr = qmgr; 210 + msgq->name = name; 211 + mutex_init(&msgq->mutex); 212 + return 0; 213 + }
-577
drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - 24 - #include "msgqueue.h" 25 - #include <engine/falcon.h> 26 - 27 - #include <subdev/secboot.h> 28 - 29 - 30 - #define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr) 31 - #define QUEUE_ALIGNMENT 4 32 - /* max size of the messages we can receive */ 33 - #define MSG_BUF_SIZE 128 34 - 35 - static int 36 - msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) 37 - { 38 - struct nvkm_falcon *falcon = priv->falcon; 39 - 40 - mutex_lock(&queue->mutex); 41 - 42 - queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg); 43 - 44 - return 0; 45 - } 46 - 47 - static void 48 - msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 49 - bool commit) 50 - { 51 - struct nvkm_falcon *falcon = priv->falcon; 52 - 53 - if (commit) 54 - nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position); 55 - 56 - mutex_unlock(&queue->mutex); 57 - } 58 - 59 - static bool 60 - msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) 61 - { 62 - struct nvkm_falcon *falcon = priv->falcon; 63 - u32 head, tail; 64 - 65 - head = nvkm_falcon_rd32(falcon, queue->head_reg); 66 - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); 67 - 68 - return head == tail; 69 - } 70 - 71 - static int 72 - msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 73 - void *data, u32 size) 74 - { 75 - struct nvkm_falcon *falcon = priv->falcon; 76 - const struct nvkm_subdev *subdev = priv->falcon->owner; 77 - u32 head, tail, available; 78 - 79 - head = nvkm_falcon_rd32(falcon, queue->head_reg); 80 - /* has the buffer looped? */ 81 - if (head < queue->position) 82 - queue->position = queue->offset; 83 - 84 - tail = queue->position; 85 - 86 - available = head - tail; 87 - 88 - if (available == 0) { 89 - nvkm_warn(subdev, "no message data available\n"); 90 - return 0; 91 - } 92 - 93 - if (size > available) { 94 - nvkm_warn(subdev, "message data smaller than read request\n"); 95 - size = available; 96 - } 97 - 98 - nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data); 99 - queue->position += ALIGN(size, QUEUE_ALIGNMENT); 100 - 101 - return size; 102 - } 103 - 104 - static int 105 - msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 106 - struct nvkm_msgqueue_hdr *hdr) 107 - { 108 - const struct nvkm_subdev *subdev = priv->falcon->owner; 109 - int err; 110 - 111 - err = msg_queue_open(priv, queue); 112 - if (err) { 113 - nvkm_error(subdev, "fail to open queue %d\n", queue->index); 114 - return err; 115 - } 116 - 117 - if (msg_queue_empty(priv, queue)) { 118 - err = 0; 119 - goto close; 120 - } 121 - 122 - err = msg_queue_pop(priv, queue, hdr, HDR_SIZE); 123 - if (err >= 0 && err != HDR_SIZE) 124 - err = -EINVAL; 125 - if (err < 0) { 126 - nvkm_error(subdev, "failed to read message header: %d\n", err); 127 - goto close; 128 - } 129 - 130 - if (hdr->size > MSG_BUF_SIZE) { 131 - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); 132 - err = -ENOSPC; 133 - goto close; 134 - } 135 - 136 - if (hdr->size > HDR_SIZE) { 137 - u32 read_size = hdr->size - HDR_SIZE; 138 - 139 - err = msg_queue_pop(priv, queue, (hdr + 1), read_size); 140 - if (err >= 0 && err != read_size) 141 - err = -EINVAL; 142 - if (err < 0) { 143 - nvkm_error(subdev, "failed to read message: %d\n", err); 144 - goto close; 145 - } 146 - } 147 - 148 - close: 149 - msg_queue_close(priv, queue, (err >= 0)); 150 - 151 - return err; 152 - } 153 - 154 - static bool 155 - cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 156 - u32 size, bool *rewind) 157 - { 158 - struct nvkm_falcon *falcon = priv->falcon; 159 - u32 head, tail, free; 160 - 161 - size = ALIGN(size, QUEUE_ALIGNMENT); 162 - 163 - head = nvkm_falcon_rd32(falcon, queue->head_reg); 164 - tail = nvkm_falcon_rd32(falcon, queue->tail_reg); 165 - 166 - if (head >= tail) { 167 - free = queue->offset + queue->size - head; 168 - free -= HDR_SIZE; 169 - 170 - if (size > free) { 171 - *rewind = true; 172 - head = queue->offset; 173 - } 174 - } 175 - 176 - if (head < tail) 177 - free = tail - head - 1; 178 - 179 - return size <= free; 180 - } 181 - 182 - static int 183 - cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 184 - void *data, u32 size) 185 - { 186 - nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0); 187 - queue->position += ALIGN(size, QUEUE_ALIGNMENT); 188 - 189 - return 0; 190 - } 191 - 192 - /* REWIND unit is always 0x00 */ 193 - #define MSGQUEUE_UNIT_REWIND 0x00 194 - 195 - static void 196 - cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue) 197 - { 198 - const struct nvkm_subdev *subdev = priv->falcon->owner; 199 - struct nvkm_msgqueue_hdr cmd; 200 - int err; 201 - 202 - cmd.unit_id = MSGQUEUE_UNIT_REWIND; 203 - cmd.size = sizeof(cmd); 204 - err = cmd_queue_push(priv, queue, &cmd, cmd.size); 205 - if (err) 206 - nvkm_error(subdev, "queue %d rewind failed\n", queue->index); 207 - else 208 - nvkm_error(subdev, "queue %d rewinded\n", queue->index); 209 - 210 - queue->position = queue->offset; 211 - } 212 - 213 - static int 214 - cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 215 - u32 size) 216 - { 217 - struct nvkm_falcon *falcon = priv->falcon; 218 - const struct nvkm_subdev *subdev = priv->falcon->owner; 219 - bool rewind = false; 220 - 221 - mutex_lock(&queue->mutex); 222 - 223 - if (!cmd_queue_has_room(priv, queue, size, &rewind)) { 224 - nvkm_error(subdev, "queue full\n"); 225 - mutex_unlock(&queue->mutex); 226 - return -EAGAIN; 227 - } 228 - 229 - queue->position = nvkm_falcon_rd32(falcon, queue->head_reg); 230 - 231 - if (rewind) 232 - cmd_queue_rewind(priv, queue); 233 - 234 - return 0; 235 - } 236 - 237 - static void 238 - cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue, 239 - bool commit) 240 - { 241 - struct nvkm_falcon *falcon = priv->falcon; 242 - 243 - if (commit) 244 - nvkm_falcon_wr32(falcon, queue->head_reg, queue->position); 245 - 246 - mutex_unlock(&queue->mutex); 247 - } 248 - 249 - static int 250 - cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd, 251 - struct nvkm_msgqueue_queue *queue) 252 - { 253 - const struct nvkm_subdev *subdev = priv->falcon->owner; 254 - static unsigned timeout = 2000; 255 - unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout); 256 - int ret = -EAGAIN; 257 - bool commit = true; 258 - 259 - while (ret == -EAGAIN && time_before(jiffies, end_jiffies)) 260 - ret = cmd_queue_open(priv, queue, cmd->size); 261 - if (ret) { 262 - nvkm_error(subdev, "pmu_queue_open_write failed\n"); 263 - return ret; 264 - } 265 - 266 - ret = cmd_queue_push(priv, queue, cmd, cmd->size); 267 - if (ret) { 268 - nvkm_error(subdev, "pmu_queue_push failed\n"); 269 - commit = false; 270 - } 271 - 272 - cmd_queue_close(priv, queue, commit); 273 - 274 - return ret; 275 - } 276 - 277 - static struct nvkm_msgqueue_seq * 278 - msgqueue_seq_acquire(struct nvkm_msgqueue *priv) 279 - { 280 - const struct nvkm_subdev *subdev = priv->falcon->owner; 281 - struct nvkm_msgqueue_seq *seq; 282 - u32 index; 283 - 284 - mutex_lock(&priv->seq_lock); 285 - 286 - index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES); 287 - 288 - if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) { 289 - nvkm_error(subdev, "no free sequence available\n"); 290 - mutex_unlock(&priv->seq_lock); 291 - return ERR_PTR(-EAGAIN); 292 - } 293 - 294 - set_bit(index, priv->seq_tbl); 295 - 296 - mutex_unlock(&priv->seq_lock); 297 - 298 - seq = &priv->seq[index]; 299 - seq->state = SEQ_STATE_PENDING; 300 - 301 - return seq; 302 - } 303 - 304 - static void 305 - msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq) 306 - { 307 - /* no need to acquire seq_lock since clear_bit is atomic */ 308 - seq->state = SEQ_STATE_FREE; 309 - seq->callback = NULL; 310 - seq->completion = NULL; 311 - clear_bit(seq->id, priv->seq_tbl); 312 - } 313 - 314 - /* specifies that we want to know the command status in the answer message */ 315 - #define CMD_FLAGS_STATUS BIT(0) 316 - /* specifies that we want an interrupt when the answer message is queued */ 317 - #define CMD_FLAGS_INTR BIT(1) 318 - 319 - int 320 - nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio, 321 - struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb, 322 - struct completion *completion, bool wait_init) 323 - { 324 - struct nvkm_msgqueue_seq *seq; 325 - struct nvkm_msgqueue_queue *queue; 326 - int ret; 327 - 328 - if (wait_init && !wait_for_completion_timeout(&priv->init_done, 329 - msecs_to_jiffies(1000))) 330 - return -ETIMEDOUT; 331 - 332 - queue = priv->func->cmd_queue(priv, prio); 333 - if (IS_ERR(queue)) 334 - return PTR_ERR(queue); 335 - 336 - seq = msgqueue_seq_acquire(priv); 337 - if (IS_ERR(seq)) 338 - return PTR_ERR(seq); 339 - 340 - cmd->seq_id = seq->id; 341 - cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR; 342 - 343 - seq->callback = cb; 344 - seq->state = SEQ_STATE_USED; 345 - seq->completion = completion; 346 - 347 - ret = cmd_write(priv, cmd, queue); 348 - if (ret) { 349 - seq->state = SEQ_STATE_PENDING; 350 - msgqueue_seq_release(priv, seq); 351 - } 352 - 353 - return ret; 354 - } 355 - 356 - static int 357 - msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr) 358 - { 359 - const struct nvkm_subdev *subdev = priv->falcon->owner; 360 - struct nvkm_msgqueue_seq *seq; 361 - 362 - seq = &priv->seq[hdr->seq_id]; 363 - if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) { 364 - nvkm_error(subdev, "msg for unknown sequence %d", seq->id); 365 - return -EINVAL; 366 - } 367 - 368 - if (seq->state == SEQ_STATE_USED) { 369 - if (seq->callback) 370 - seq->callback(priv, hdr); 371 - } 372 - 373 - if (seq->completion) 374 - complete(seq->completion); 375 - 376 - msgqueue_seq_release(priv, seq); 377 - 378 - return 0; 379 - } 380 - 381 - static int 382 - msgqueue_handle_init_msg(struct nvkm_msgqueue *priv, 383 - struct nvkm_msgqueue_hdr *hdr) 384 - { 385 - struct nvkm_falcon *falcon = priv->falcon; 386 - const struct nvkm_subdev *subdev = falcon->owner; 387 - u32 tail; 388 - u32 tail_reg; 389 - int ret; 390 - 391 - /* 392 - * Of course the message queue registers vary depending on the falcon 393 - * used... 394 - */ 395 - switch (falcon->owner->index) { 396 - case NVKM_SUBDEV_PMU: 397 - tail_reg = 0x4cc; 398 - break; 399 - case NVKM_ENGINE_SEC2: 400 - tail_reg = 0xa34; 401 - break; 402 - default: 403 - nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n", 404 - nvkm_subdev_name[falcon->owner->index]); 405 - return -EINVAL; 406 - } 407 - 408 - /* 409 - * Read the message - queues are not initialized yet so we cannot rely 410 - * on msg_queue_read() 411 - */ 412 - tail = nvkm_falcon_rd32(falcon, tail_reg); 413 - nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr); 414 - 415 - if (hdr->size > MSG_BUF_SIZE) { 416 - nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size); 417 - return -ENOSPC; 418 - } 419 - 420 - nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0, 421 - (hdr + 1)); 422 - 423 - tail += ALIGN(hdr->size, QUEUE_ALIGNMENT); 424 - nvkm_falcon_wr32(falcon, tail_reg, tail); 425 - 426 - ret = priv->func->init_func->init_callback(priv, hdr); 427 - if (ret) 428 - return ret; 429 - 430 - return 0; 431 - } 432 - 433 - void 434 - nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv, 435 - struct nvkm_msgqueue_queue *queue) 436 - { 437 - /* 438 - * We are invoked from a worker thread, so normally we have plenty of 439 - * stack space to work with. 440 - */ 441 - u8 msg_buffer[MSG_BUF_SIZE]; 442 - struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer; 443 - int ret; 444 - 445 - /* the first message we receive must be the init message */ 446 - if ((!priv->init_msg_received)) { 447 - ret = msgqueue_handle_init_msg(priv, hdr); 448 - if (!ret) 449 - priv->init_msg_received = true; 450 - } else { 451 - while (msg_queue_read(priv, queue, hdr) > 0) 452 - msgqueue_msg_handle(priv, hdr); 453 - } 454 - } 455 - 456 - void 457 - nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf) 458 - { 459 - if (!queue || !queue->func || !queue->func->init_func) 460 - return; 461 - 462 - queue->func->init_func->gen_cmdline(queue, buf); 463 - } 464 - 465 - int 466 - nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue, 467 - unsigned long falcon_mask) 468 - { 469 - unsigned long falcon; 470 - 471 - if (!queue || !queue->func->acr_func) 472 - return -ENODEV; 473 - 474 - /* Does the firmware support booting multiple falcons? */ 475 - if (queue->func->acr_func->boot_multiple_falcons) 476 - return queue->func->acr_func->boot_multiple_falcons(queue, 477 - falcon_mask); 478 - 479 - /* Else boot all requested falcons individually */ 480 - if (!queue->func->acr_func->boot_falcon) 481 - return -ENODEV; 482 - 483 - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { 484 - int ret = queue->func->acr_func->boot_falcon(queue, falcon); 485 - 486 - if (ret) 487 - return ret; 488 - } 489 - 490 - return 0; 491 - } 492 - 493 - int 494 - nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, 495 - const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue) 496 - { 497 - const struct nvkm_subdev *subdev = falcon->owner; 498 - int ret = -EINVAL; 499 - 500 - switch (version) { 501 - case 0x0137c63d: 502 - ret = msgqueue_0137c63d_new(falcon, sb, queue); 503 - break; 504 - case 0x0137bca5: 505 - ret = msgqueue_0137bca5_new(falcon, sb, queue); 506 - break; 507 - case 0x0148cdec: 508 - case 0x015ccf3e: 509 - case 0x0167d263: 510 - ret = msgqueue_0148cdec_new(falcon, sb, queue); 511 - break; 512 - default: 513 - nvkm_error(subdev, "unhandled firmware version 0x%08x\n", 514 - version); 515 - break; 516 - } 517 - 518 - if (ret == 0) { 519 - nvkm_debug(subdev, "firmware version: 0x%08x\n", version); 520 - (*queue)->fw_version = version; 521 - } 522 - 523 - return ret; 524 - } 525 - 526 - void 527 - nvkm_msgqueue_del(struct nvkm_msgqueue **queue) 528 - { 529 - if (*queue) { 530 - (*queue)->func->dtor(*queue); 531 - *queue = NULL; 532 - } 533 - } 534 - 535 - void 536 - nvkm_msgqueue_recv(struct nvkm_msgqueue *queue) 537 - { 538 - if (!queue->func || !queue->func->recv) { 539 - const struct nvkm_subdev *subdev = queue->falcon->owner; 540 - 541 - nvkm_warn(subdev, "missing msgqueue recv function\n"); 542 - return; 543 - } 544 - 545 - queue->func->recv(queue); 546 - } 547 - 548 - int 549 - nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue) 550 - { 551 - /* firmware not set yet... */ 552 - if (!queue) 553 - return 0; 554 - 555 - queue->init_msg_received = false; 556 - reinit_completion(&queue->init_done); 557 - 558 - return 0; 559 - } 560 - 561 - void 562 - nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func, 563 - struct nvkm_falcon *falcon, 564 - struct nvkm_msgqueue *queue) 565 - { 566 - int i; 567 - 568 - queue->func = func; 569 - queue->falcon = falcon; 570 - mutex_init(&queue->seq_lock); 571 - for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++) 572 - queue->seq[i].id = i; 573 - 574 - init_completion(&queue->init_done); 575 - 576 - 577 - }
-213
drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - 24 - #ifndef __NVKM_CORE_FALCON_MSGQUEUE_H 25 - #define __NVKM_CORE_FALCON_MSGQUEUE_H 26 - 27 - #include <core/msgqueue.h> 28 - 29 - /* 30 - * The struct nvkm_msgqueue (named so for lack of better candidate) manages 31 - * a firmware (typically, NVIDIA signed firmware) running under a given falcon. 32 - * 33 - * Such firmwares expect to receive commands (through one or several command 34 - * queues) and will reply to such command by sending messages (using one 35 - * message queue). 36 - * 37 - * Each firmware can support one or several units - ACR for managing secure 38 - * falcons, PMU for power management, etc. A unit can be seen as a class to 39 - * which command can be sent. 40 - * 41 - * One usage example would be to send a command to the SEC falcon to ask it to 42 - * reset a secure falcon. The SEC falcon will receive the command, process it, 43 - * and send a message to signal success or failure. Only when the corresponding 44 - * message is received can the requester assume the request has been processed. 45 - * 46 - * Since we expect many variations between the firmwares NVIDIA will release 47 - * across GPU generations, this library is built in a very modular way. Message 48 - * formats and queues details (such as number of usage) are left to 49 - * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c 50 - * take care of posting commands and processing messages in a fashion that is 51 - * universal. 52 - * 53 - */ 54 - 55 - enum msgqueue_msg_priority { 56 - MSGQUEUE_MSG_PRIORITY_HIGH, 57 - MSGQUEUE_MSG_PRIORITY_LOW, 58 - }; 59 - 60 - /** 61 - * struct nvkm_msgqueue_hdr - header for all commands/messages 62 - * @unit_id: id of firmware using receiving the command/sending the message 63 - * @size: total size of command/message 64 - * @ctrl_flags: type of command/message 65 - * @seq_id: used to match a message from its corresponding command 66 - */ 67 - struct nvkm_msgqueue_hdr { 68 - u8 unit_id; 69 - u8 size; 70 - u8 ctrl_flags; 71 - u8 seq_id; 72 - }; 73 - 74 - /** 75 - * struct nvkm_msgqueue_msg - base message. 76 - * 77 - * This is just a header and a message (or command) type. Useful when 78 - * building command-specific structures. 79 - */ 80 - struct nvkm_msgqueue_msg { 81 - struct nvkm_msgqueue_hdr hdr; 82 - u8 msg_type; 83 - }; 84 - 85 - struct nvkm_msgqueue; 86 - typedef void 87 - (*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); 88 - 89 - /** 90 - * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization 91 - * 92 - * @gen_cmdline: build the commandline into a pre-allocated buffer 93 - * @init_callback: called to process the init message 94 - */ 95 - struct nvkm_msgqueue_init_func { 96 - void (*gen_cmdline)(struct nvkm_msgqueue *, void *); 97 - int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *); 98 - }; 99 - 100 - /** 101 - * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR 102 - * 103 - * @boot_falcon: build and send the command to reset a given falcon 104 - * @boot_multiple_falcons: build and send the command to reset several falcons 105 - */ 106 - struct nvkm_msgqueue_acr_func { 107 - int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon); 108 - int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long); 109 - }; 110 - 111 - struct nvkm_msgqueue_func { 112 - const struct nvkm_msgqueue_init_func *init_func; 113 - const struct nvkm_msgqueue_acr_func *acr_func; 114 - void (*dtor)(struct nvkm_msgqueue *); 115 - struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *, 116 - enum msgqueue_msg_priority); 117 - void (*recv)(struct nvkm_msgqueue *queue); 118 - }; 119 - 120 - /** 121 - * struct nvkm_msgqueue_queue - information about a command or message queue 122 - * 123 - * The number of queues is firmware-dependent. All queues must have their 124 - * information filled by the init message handler. 125 - * 126 - * @mutex_lock: to be acquired when the queue is being used 127 - * @index: physical queue index 128 - * @offset: DMEM offset where this queue begins 129 - * @size: size allocated to this queue in DMEM (in bytes) 130 - * @position: current write position 131 - * @head_reg: address of the HEAD register for this queue 132 - * @tail_reg: address of the TAIL register for this queue 133 - */ 134 - struct nvkm_msgqueue_queue { 135 - struct mutex mutex; 136 - u32 index; 137 - u32 offset; 138 - u32 size; 139 - u32 position; 140 - 141 - u32 head_reg; 142 - u32 tail_reg; 143 - }; 144 - 145 - /** 146 - * struct nvkm_msgqueue_seq - keep track of ongoing commands 147 - * 148 - * Every time a command is sent, a sequence is assigned to it so the 149 - * corresponding message can be matched. Upon receiving the message, a callback 150 - * can be called and/or a completion signaled. 151 - * 152 - * @id: sequence ID 153 - * @state: current state 154 - * @callback: callback to call upon receiving matching message 155 - * @completion: completion to signal after callback is called 156 - */ 157 - struct nvkm_msgqueue_seq { 158 - u16 id; 159 - enum { 160 - SEQ_STATE_FREE = 0, 161 - SEQ_STATE_PENDING, 162 - SEQ_STATE_USED, 163 - SEQ_STATE_CANCELLED 164 - } state; 165 - nvkm_msgqueue_callback callback; 166 - struct completion *completion; 167 - }; 168 - 169 - /* 170 - * We can have an arbitrary number of sequences, but realistically we will 171 - * probably not use that much simultaneously. 172 - */ 173 - #define NVKM_MSGQUEUE_NUM_SEQUENCES 16 174 - 175 - /** 176 - * struct nvkm_msgqueue - manage a command/message based FW on a falcon 177 - * 178 - * @falcon: falcon to be managed 179 - * @func: implementation of the firmware to use 180 - * @init_msg_received: whether the init message has already been received 181 - * @init_done: whether all init is complete and commands can be processed 182 - * @seq_lock: protects seq and seq_tbl 183 - * @seq: sequences to match commands and messages 184 - * @seq_tbl: bitmap of sequences currently in use 185 - */ 186 - struct nvkm_msgqueue { 187 - struct nvkm_falcon *falcon; 188 - const struct nvkm_msgqueue_func *func; 189 - u32 fw_version; 190 - bool init_msg_received; 191 - struct completion init_done; 192 - 193 - struct mutex seq_lock; 194 - struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES]; 195 - unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)]; 196 - }; 197 - 198 - void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *, 199 - struct nvkm_msgqueue *); 200 - int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority, 201 - struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback, 202 - struct completion *, bool); 203 - void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *, 204 - struct nvkm_msgqueue_queue *); 205 - 206 - int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *, 207 - struct nvkm_msgqueue **); 208 - int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *, 209 - struct nvkm_msgqueue **); 210 - int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *, 211 - struct nvkm_msgqueue **); 212 - 213 - #endif
-436
drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - #include "msgqueue.h" 24 - #include <engine/falcon.h> 25 - #include <subdev/secboot.h> 26 - 27 - /* Queues identifiers */ 28 - enum { 29 - /* High Priority Command Queue for Host -> PMU communication */ 30 - MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0, 31 - /* Low Priority Command Queue for Host -> PMU communication */ 32 - MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1, 33 - /* Message queue for PMU -> Host communication */ 34 - MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4, 35 - MSGQUEUE_0137C63D_NUM_QUEUES = 5, 36 - }; 37 - 38 - struct msgqueue_0137c63d { 39 - struct nvkm_msgqueue base; 40 - 41 - struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES]; 42 - }; 43 - #define msgqueue_0137c63d(q) \ 44 - container_of(q, struct msgqueue_0137c63d, base) 45 - 46 - struct msgqueue_0137bca5 { 47 - struct msgqueue_0137c63d base; 48 - 49 - u64 wpr_addr; 50 - }; 51 - #define msgqueue_0137bca5(q) \ 52 - container_of(container_of(q, struct msgqueue_0137c63d, base), \ 53 - struct msgqueue_0137bca5, base); 54 - 55 - static struct nvkm_msgqueue_queue * 56 - msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue, 57 - enum msgqueue_msg_priority priority) 58 - { 59 - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); 60 - const struct nvkm_subdev *subdev = priv->base.falcon->owner; 61 - 62 - switch (priority) { 63 - case MSGQUEUE_MSG_PRIORITY_HIGH: 64 - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ]; 65 - case MSGQUEUE_MSG_PRIORITY_LOW: 66 - return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ]; 67 - default: 68 - nvkm_error(subdev, "invalid command queue!\n"); 69 - return ERR_PTR(-EINVAL); 70 - } 71 - } 72 - 73 - static void 74 - msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue) 75 - { 76 - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue); 77 - struct nvkm_msgqueue_queue *q_queue = 78 - &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE]; 79 - 80 - nvkm_msgqueue_process_msgs(&priv->base, q_queue); 81 - } 82 - 83 - /* Init unit */ 84 - #define MSGQUEUE_0137C63D_UNIT_INIT 0x07 85 - 86 - enum { 87 - INIT_MSG_INIT = 0x0, 88 - }; 89 - 90 - static void 91 - init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) 92 - { 93 - struct { 94 - u32 reserved; 95 - u32 freq_hz; 96 - u32 trace_size; 97 - u32 trace_dma_base; 98 - u16 trace_dma_base1; 99 - u8 trace_dma_offset; 100 - u32 trace_dma_idx; 101 - bool secure_mode; 102 - bool raise_priv_sec; 103 - struct { 104 - u32 dma_base; 105 - u16 dma_base1; 106 - u8 dma_offset; 107 - u16 fb_size; 108 - u8 dma_idx; 109 - } gc6_ctx; 110 - u8 pad; 111 - } *args = buf; 112 - 113 - args->secure_mode = 1; 114 - } 115 - 116 - /* forward declaration */ 117 - static int acr_init_wpr(struct nvkm_msgqueue *queue); 118 - 119 - static int 120 - init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) 121 - { 122 - struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue); 123 - struct { 124 - struct nvkm_msgqueue_msg base; 125 - 126 - u8 pad; 127 - u16 os_debug_entry_point; 128 - 129 - struct { 130 - u16 size; 131 - u16 offset; 132 - u8 index; 133 - u8 pad; 134 - } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES]; 135 - 136 - u16 sw_managed_area_offset; 137 - u16 sw_managed_area_size; 138 - } *init = (void *)hdr; 139 - const struct nvkm_subdev *subdev = _queue->falcon->owner; 140 - int i; 141 - 142 - if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) { 143 - nvkm_error(subdev, "expected message from init unit\n"); 144 - return -EINVAL; 145 - } 146 - 147 - if (init->base.msg_type != INIT_MSG_INIT) { 148 - nvkm_error(subdev, "expected PMU init msg\n"); 149 - return -EINVAL; 150 - } 151 - 152 - for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) { 153 - struct nvkm_msgqueue_queue *queue = &priv->queue[i]; 154 - 155 - mutex_init(&queue->mutex); 156 - 157 - queue->index = init->queue_info[i].index; 158 - queue->offset = init->queue_info[i].offset; 159 - queue->size = init->queue_info[i].size; 160 - 161 - if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) { 162 - queue->head_reg = 0x4a0 + (queue->index * 4); 163 - queue->tail_reg = 0x4b0 + (queue->index * 4); 164 - } else { 165 - queue->head_reg = 0x4c8; 166 - queue->tail_reg = 0x4cc; 167 - } 168 - 169 - nvkm_debug(subdev, 170 - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", 171 - i, queue->index, queue->offset, queue->size); 172 - } 173 - 174 - /* Complete initialization by initializing WPR region */ 175 - return acr_init_wpr(&priv->base); 176 - } 177 - 178 - static const struct nvkm_msgqueue_init_func 179 - msgqueue_0137c63d_init_func = { 180 - .gen_cmdline = init_gen_cmdline, 181 - .init_callback = init_callback, 182 - }; 183 - 184 - 185 - 186 - /* ACR unit */ 187 - #define MSGQUEUE_0137C63D_UNIT_ACR 0x0a 188 - 189 - enum { 190 - ACR_CMD_INIT_WPR_REGION = 0x00, 191 - ACR_CMD_BOOTSTRAP_FALCON = 0x01, 192 - ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03, 193 - }; 194 - 195 - static void 196 - acr_init_wpr_callback(struct nvkm_msgqueue *queue, 197 - struct nvkm_msgqueue_hdr *hdr) 198 - { 199 - struct { 200 - struct nvkm_msgqueue_msg base; 201 - u32 error_code; 202 - } *msg = (void *)hdr; 203 - const struct nvkm_subdev *subdev = queue->falcon->owner; 204 - 205 - if (msg->error_code) { 206 - nvkm_error(subdev, "ACR WPR init failure: %d\n", 207 - msg->error_code); 208 - return; 209 - } 210 - 211 - nvkm_debug(subdev, "ACR WPR init complete\n"); 212 - complete_all(&queue->init_done); 213 - } 214 - 215 - static int 216 - acr_init_wpr(struct nvkm_msgqueue *queue) 217 - { 218 - /* 219 - * region_id: region ID in WPR region 220 - * wpr_offset: offset in WPR region 221 - */ 222 - struct { 223 - struct nvkm_msgqueue_hdr hdr; 224 - u8 cmd_type; 225 - u32 region_id; 226 - u32 wpr_offset; 227 - } cmd; 228 - memset(&cmd, 0, sizeof(cmd)); 229 - 230 - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; 231 - cmd.hdr.size = sizeof(cmd); 232 - cmd.cmd_type = ACR_CMD_INIT_WPR_REGION; 233 - cmd.region_id = 0x01; 234 - cmd.wpr_offset = 0x00; 235 - 236 - nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, 237 - acr_init_wpr_callback, NULL, false); 238 - 239 - return 0; 240 - } 241 - 242 - 243 - static void 244 - acr_boot_falcon_callback(struct nvkm_msgqueue *priv, 245 - struct nvkm_msgqueue_hdr *hdr) 246 - { 247 - struct acr_bootstrap_falcon_msg { 248 - struct nvkm_msgqueue_msg base; 249 - 250 - u32 falcon_id; 251 - } *msg = (void *)hdr; 252 - const struct nvkm_subdev *subdev = priv->falcon->owner; 253 - u32 falcon_id = msg->falcon_id; 254 - 255 - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { 256 - nvkm_error(subdev, "in bootstrap falcon callback:\n"); 257 - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); 258 - return; 259 - } 260 - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); 261 - } 262 - 263 - enum { 264 - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, 265 - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, 266 - }; 267 - 268 - static int 269 - acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) 270 - { 271 - DECLARE_COMPLETION_ONSTACK(completed); 272 - /* 273 - * flags - Flag specifying RESET or no RESET. 274 - * falcon id - Falcon id specifying falcon to bootstrap. 275 - */ 276 - struct { 277 - struct nvkm_msgqueue_hdr hdr; 278 - u8 cmd_type; 279 - u32 flags; 280 - u32 falcon_id; 281 - } cmd; 282 - 283 - memset(&cmd, 0, sizeof(cmd)); 284 - 285 - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; 286 - cmd.hdr.size = sizeof(cmd); 287 - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; 288 - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 289 - cmd.falcon_id = falcon; 290 - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, 291 - acr_boot_falcon_callback, &completed, true); 292 - 293 - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) 294 - return -ETIMEDOUT; 295 - 296 - return 0; 297 - } 298 - 299 - static void 300 - acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv, 301 - struct nvkm_msgqueue_hdr *hdr) 302 - { 303 - struct acr_bootstrap_falcon_msg { 304 - struct nvkm_msgqueue_msg base; 305 - 306 - u32 falcon_mask; 307 - } *msg = (void *)hdr; 308 - const struct nvkm_subdev *subdev = priv->falcon->owner; 309 - unsigned long falcon_mask = msg->falcon_mask; 310 - u32 falcon_id, falcon_treated = 0; 311 - 312 - for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) { 313 - nvkm_debug(subdev, "%s booted\n", 314 - nvkm_secboot_falcon_name[falcon_id]); 315 - falcon_treated |= BIT(falcon_id); 316 - } 317 - 318 - if (falcon_treated != msg->falcon_mask) { 319 - nvkm_error(subdev, "in bootstrap falcon callback:\n"); 320 - nvkm_error(subdev, "invalid falcon mask 0x%x\n", 321 - msg->falcon_mask); 322 - return; 323 - } 324 - } 325 - 326 - static int 327 - acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask) 328 - { 329 - DECLARE_COMPLETION_ONSTACK(completed); 330 - /* 331 - * flags - Flag specifying RESET or no RESET. 332 - * falcon id - Falcon id specifying falcon to bootstrap. 333 - */ 334 - struct { 335 - struct nvkm_msgqueue_hdr hdr; 336 - u8 cmd_type; 337 - u32 flags; 338 - u32 falcon_mask; 339 - u32 use_va_mask; 340 - u32 wpr_lo; 341 - u32 wpr_hi; 342 - } cmd; 343 - struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv); 344 - 345 - memset(&cmd, 0, sizeof(cmd)); 346 - 347 - cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; 348 - cmd.hdr.size = sizeof(cmd); 349 - cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS; 350 - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 351 - cmd.falcon_mask = falcon_mask; 352 - cmd.wpr_lo = lower_32_bits(queue->wpr_addr); 353 - cmd.wpr_hi = upper_32_bits(queue->wpr_addr); 354 - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, 355 - acr_boot_multiple_falcons_callback, &completed, true); 356 - 357 - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) 358 - return -ETIMEDOUT; 359 - 360 - return 0; 361 - } 362 - 363 - static const struct nvkm_msgqueue_acr_func 364 - msgqueue_0137c63d_acr_func = { 365 - .boot_falcon = acr_boot_falcon, 366 - }; 367 - 368 - static const struct nvkm_msgqueue_acr_func 369 - msgqueue_0137bca5_acr_func = { 370 - .boot_falcon = acr_boot_falcon, 371 - .boot_multiple_falcons = acr_boot_multiple_falcons, 372 - }; 373 - 374 - static void 375 - msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue) 376 - { 377 - kfree(msgqueue_0137c63d(queue)); 378 - } 379 - 380 - static const struct nvkm_msgqueue_func 381 - msgqueue_0137c63d_func = { 382 - .init_func = &msgqueue_0137c63d_init_func, 383 - .acr_func = &msgqueue_0137c63d_acr_func, 384 - .cmd_queue = msgqueue_0137c63d_cmd_queue, 385 - .recv = msgqueue_0137c63d_process_msgs, 386 - .dtor = msgqueue_0137c63d_dtor, 387 - }; 388 - 389 - int 390 - msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, 391 - struct nvkm_msgqueue **queue) 392 - { 393 - struct msgqueue_0137c63d *ret; 394 - 395 - ret = kzalloc(sizeof(*ret), GFP_KERNEL); 396 - if (!ret) 397 - return -ENOMEM; 398 - 399 - *queue = &ret->base; 400 - 401 - nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base); 402 - 403 - return 0; 404 - } 405 - 406 - static const struct nvkm_msgqueue_func 407 - msgqueue_0137bca5_func = { 408 - .init_func = &msgqueue_0137c63d_init_func, 409 - .acr_func = &msgqueue_0137bca5_acr_func, 410 - .cmd_queue = msgqueue_0137c63d_cmd_queue, 411 - .recv = msgqueue_0137c63d_process_msgs, 412 - .dtor = msgqueue_0137c63d_dtor, 413 - }; 414 - 415 - int 416 - msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, 417 - struct nvkm_msgqueue **queue) 418 - { 419 - struct msgqueue_0137bca5 *ret; 420 - 421 - ret = kzalloc(sizeof(*ret), GFP_KERNEL); 422 - if (!ret) 423 - return -ENOMEM; 424 - 425 - *queue = &ret->base.base; 426 - 427 - /* 428 - * FIXME this must be set to the address of a *GPU* mapping within the 429 - * ACR address space! 430 - */ 431 - /* ret->wpr_addr = sb->wpr_addr; */ 432 - 433 - nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base); 434 - 435 - return 0; 436 - }
-264
drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - 24 - #include "msgqueue.h" 25 - #include <engine/falcon.h> 26 - #include <subdev/secboot.h> 27 - 28 - /* 29 - * This firmware runs on the SEC falcon. It only has one command and one 30 - * message queue, and uses a different command line and init message. 31 - */ 32 - 33 - enum { 34 - MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0, 35 - MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1, 36 - MSGQUEUE_0148CDEC_NUM_QUEUES, 37 - }; 38 - 39 - struct msgqueue_0148cdec { 40 - struct nvkm_msgqueue base; 41 - 42 - struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES]; 43 - }; 44 - #define msgqueue_0148cdec(q) \ 45 - container_of(q, struct msgqueue_0148cdec, base) 46 - 47 - static struct nvkm_msgqueue_queue * 48 - msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue, 49 - enum msgqueue_msg_priority priority) 50 - { 51 - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); 52 - 53 - return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE]; 54 - } 55 - 56 - static void 57 - msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue) 58 - { 59 - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue); 60 - struct nvkm_msgqueue_queue *q_queue = 61 - &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE]; 62 - 63 - nvkm_msgqueue_process_msgs(&priv->base, q_queue); 64 - } 65 - 66 - 67 - /* Init unit */ 68 - #define MSGQUEUE_0148CDEC_UNIT_INIT 0x01 69 - 70 - enum { 71 - INIT_MSG_INIT = 0x0, 72 - }; 73 - 74 - static void 75 - init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf) 76 - { 77 - struct { 78 - u32 freq_hz; 79 - u32 falc_trace_size; 80 - u32 falc_trace_dma_base; 81 - u32 falc_trace_dma_idx; 82 - bool secure_mode; 83 - } *args = buf; 84 - 85 - args->secure_mode = false; 86 - } 87 - 88 - static int 89 - init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr) 90 - { 91 - struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue); 92 - struct { 93 - struct nvkm_msgqueue_msg base; 94 - 95 - u8 num_queues; 96 - u16 os_debug_entry_point; 97 - 98 - struct { 99 - u32 offset; 100 - u16 size; 101 - u8 index; 102 - u8 id; 103 - } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES]; 104 - 105 - u16 sw_managed_area_offset; 106 - u16 sw_managed_area_size; 107 - } *init = (void *)hdr; 108 - const struct nvkm_subdev *subdev = _queue->falcon->owner; 109 - int i; 110 - 111 - if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) { 112 - nvkm_error(subdev, "expected message from init unit\n"); 113 - return -EINVAL; 114 - } 115 - 116 - if (init->base.msg_type != INIT_MSG_INIT) { 117 - nvkm_error(subdev, "expected SEC init msg\n"); 118 - return -EINVAL; 119 - } 120 - 121 - for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) { 122 - u8 id = init->queue_info[i].id; 123 - struct nvkm_msgqueue_queue *queue = &priv->queue[id]; 124 - 125 - mutex_init(&queue->mutex); 126 - 127 - queue->index = init->queue_info[i].index; 128 - queue->offset = init->queue_info[i].offset; 129 - queue->size = init->queue_info[i].size; 130 - 131 - if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) { 132 - queue->head_reg = 0xa30 + (queue->index * 8); 133 - queue->tail_reg = 0xa34 + (queue->index * 8); 134 - } else { 135 - queue->head_reg = 0xa00 + (queue->index * 8); 136 - queue->tail_reg = 0xa04 + (queue->index * 8); 137 - } 138 - 139 - nvkm_debug(subdev, 140 - "queue %d: index %d, offset 0x%08x, size 0x%08x\n", 141 - id, queue->index, queue->offset, queue->size); 142 - } 143 - 144 - complete_all(&_queue->init_done); 145 - 146 - return 0; 147 - } 148 - 149 - static const struct nvkm_msgqueue_init_func 150 - msgqueue_0148cdec_init_func = { 151 - .gen_cmdline = init_gen_cmdline, 152 - .init_callback = init_callback, 153 - }; 154 - 155 - 156 - 157 - /* ACR unit */ 158 - #define MSGQUEUE_0148CDEC_UNIT_ACR 0x08 159 - 160 - enum { 161 - ACR_CMD_BOOTSTRAP_FALCON = 0x00, 162 - }; 163 - 164 - static void 165 - acr_boot_falcon_callback(struct nvkm_msgqueue *priv, 166 - struct nvkm_msgqueue_hdr *hdr) 167 - { 168 - struct acr_bootstrap_falcon_msg { 169 - struct nvkm_msgqueue_msg base; 170 - 171 - u32 error_code; 172 - u32 falcon_id; 173 - } *msg = (void *)hdr; 174 - const struct nvkm_subdev *subdev = priv->falcon->owner; 175 - u32 falcon_id = msg->falcon_id; 176 - 177 - if (msg->error_code) { 178 - nvkm_error(subdev, "in bootstrap falcon callback:\n"); 179 - nvkm_error(subdev, "expected error code 0x%x\n", 180 - msg->error_code); 181 - return; 182 - } 183 - 184 - if (falcon_id >= NVKM_SECBOOT_FALCON_END) { 185 - nvkm_error(subdev, "in bootstrap falcon callback:\n"); 186 - nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id); 187 - return; 188 - } 189 - 190 - nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]); 191 - } 192 - 193 - enum { 194 - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0, 195 - ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1, 196 - }; 197 - 198 - static int 199 - acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) 200 - { 201 - DECLARE_COMPLETION_ONSTACK(completed); 202 - /* 203 - * flags - Flag specifying RESET or no RESET. 204 - * falcon id - Falcon id specifying falcon to bootstrap. 205 - */ 206 - struct { 207 - struct nvkm_msgqueue_hdr hdr; 208 - u8 cmd_type; 209 - u32 flags; 210 - u32 falcon_id; 211 - } cmd; 212 - 213 - memset(&cmd, 0, sizeof(cmd)); 214 - 215 - cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR; 216 - cmd.hdr.size = sizeof(cmd); 217 - cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON; 218 - cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; 219 - cmd.falcon_id = falcon; 220 - nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, 221 - acr_boot_falcon_callback, &completed, true); 222 - 223 - if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) 224 - return -ETIMEDOUT; 225 - 226 - return 0; 227 - } 228 - 229 - const struct nvkm_msgqueue_acr_func 230 - msgqueue_0148cdec_acr_func = { 231 - .boot_falcon = acr_boot_falcon, 232 - }; 233 - 234 - static void 235 - msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue) 236 - { 237 - kfree(msgqueue_0148cdec(queue)); 238 - } 239 - 240 - const struct nvkm_msgqueue_func 241 - msgqueue_0148cdec_func = { 242 - .init_func = &msgqueue_0148cdec_init_func, 243 - .acr_func = &msgqueue_0148cdec_acr_func, 244 - .cmd_queue = msgqueue_0148cdec_cmd_queue, 245 - .recv = msgqueue_0148cdec_process_msgs, 246 - .dtor = msgqueue_0148cdec_dtor, 247 - }; 248 - 249 - int 250 - msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, 251 - struct nvkm_msgqueue **queue) 252 - { 253 - struct msgqueue_0148cdec *ret; 254 - 255 - ret = kzalloc(sizeof(*ret), GFP_KERNEL); 256 - if (!ret) 257 - return -ENOMEM; 258 - 259 - *queue = &ret->base; 260 - 261 - nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base); 262 - 263 - return 0; 264 - }
+1 -5
drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVKM_FALCON_PRIV_H__ 3 3 #define __NVKM_FALCON_PRIV_H__ 4 - #include <engine/falcon.h> 5 - 6 - void 7 - nvkm_falcon_ctor(const struct nvkm_falcon_func *, struct nvkm_subdev *, 8 - const char *, u32, struct nvkm_falcon *); 4 + #include <core/falcon.h> 9 5 #endif
+87
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
··· 1 + /* 2 + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include "qmgr.h" 24 + 25 + struct nvkm_falcon_qmgr_seq * 26 + nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr) 27 + { 28 + const struct nvkm_subdev *subdev = qmgr->falcon->owner; 29 + struct nvkm_falcon_qmgr_seq *seq; 30 + u32 index; 31 + 32 + mutex_lock(&qmgr->seq.mutex); 33 + index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM); 34 + if (index >= NVKM_FALCON_QMGR_SEQ_NUM) { 35 + nvkm_error(subdev, "no free sequence available\n"); 36 + mutex_unlock(&qmgr->seq.mutex); 37 + return ERR_PTR(-EAGAIN); 38 + } 39 + 40 + set_bit(index, qmgr->seq.tbl); 41 + mutex_unlock(&qmgr->seq.mutex); 42 + 43 + seq = &qmgr->seq.id[index]; 44 + seq->state = SEQ_STATE_PENDING; 45 + return seq; 46 + } 47 + 48 + void 49 + nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr, 50 + struct nvkm_falcon_qmgr_seq *seq) 51 + { 52 + /* no need to acquire seq.mutex since clear_bit is atomic */ 53 + seq->state = SEQ_STATE_FREE; 54 + seq->callback = NULL; 55 + reinit_completion(&seq->done); 56 + clear_bit(seq->id, qmgr->seq.tbl); 57 + } 58 + 59 + void 60 + nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr) 61 + { 62 + struct nvkm_falcon_qmgr *qmgr = *pqmgr; 63 + if (qmgr) { 64 + kfree(*pqmgr); 65 + *pqmgr = NULL; 66 + } 67 + } 68 + 69 + int 70 + nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon, 71 + struct nvkm_falcon_qmgr **pqmgr) 72 + { 73 + struct nvkm_falcon_qmgr *qmgr; 74 + int i; 75 + 76 + if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL))) 77 + return -ENOMEM; 78 + 79 + qmgr->falcon = falcon; 80 + mutex_init(&qmgr->seq.mutex); 81 + for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) { 82 + qmgr->seq.id[i].id = i; 83 + init_completion(&qmgr->seq.id[i].done); 84 + } 85 + 86 + return 0; 87 + }
+89
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_FALCON_QMGR_H__ 3 + #define __NVKM_FALCON_QMGR_H__ 4 + #include <core/falcon.h> 5 + 6 + #define HDR_SIZE sizeof(struct nv_falcon_msg) 7 + #define QUEUE_ALIGNMENT 4 8 + /* max size of the messages we can receive */ 9 + #define MSG_BUF_SIZE 128 10 + 11 + /** 12 + * struct nvkm_falcon_qmgr_seq - keep track of ongoing commands 13 + * 14 + * Every time a command is sent, a sequence is assigned to it so the 15 + * corresponding message can be matched. Upon receiving the message, a callback 16 + * can be called and/or a completion signaled. 17 + * 18 + * @id: sequence ID 19 + * @state: current state 20 + * @callback: callback to call upon receiving matching message 21 + * @completion: completion to signal after callback is called 22 + */ 23 + struct nvkm_falcon_qmgr_seq { 24 + u16 id; 25 + enum { 26 + SEQ_STATE_FREE = 0, 27 + SEQ_STATE_PENDING, 28 + SEQ_STATE_USED, 29 + SEQ_STATE_CANCELLED 30 + } state; 31 + bool async; 32 + nvkm_falcon_qmgr_callback callback; 33 + void *priv; 34 + struct completion done; 35 + int result; 36 + }; 37 + 38 + /* 39 + * We can have an arbitrary number of sequences, but realistically we will 40 + * probably not use that much simultaneously. 41 + */ 42 + #define NVKM_FALCON_QMGR_SEQ_NUM 16 43 + 44 + struct nvkm_falcon_qmgr { 45 + struct nvkm_falcon *falcon; 46 + 47 + struct { 48 + struct mutex mutex; 49 + struct nvkm_falcon_qmgr_seq id[NVKM_FALCON_QMGR_SEQ_NUM]; 50 + unsigned long tbl[BITS_TO_LONGS(NVKM_FALCON_QMGR_SEQ_NUM)]; 51 + } seq; 52 + }; 53 + 54 + struct nvkm_falcon_qmgr_seq * 55 + nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *); 56 + void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *, 57 + struct nvkm_falcon_qmgr_seq *); 58 + 59 + struct nvkm_falcon_cmdq { 60 + struct nvkm_falcon_qmgr *qmgr; 61 + const char *name; 62 + struct mutex mutex; 63 + struct completion ready; 64 + 65 + u32 head_reg; 66 + u32 tail_reg; 67 + u32 offset; 68 + u32 size; 69 + 70 + u32 position; 71 + }; 72 + 73 + struct nvkm_falcon_msgq { 74 + struct nvkm_falcon_qmgr *qmgr; 75 + const char *name; 76 + struct mutex mutex; 77 + 78 + u32 head_reg; 79 + u32 tail_reg; 80 + u32 offset; 81 + 82 + u32 position; 83 + }; 84 + 85 + #define FLCNQ_PRINTK(t,q,f,a...) \ 86 + FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a) 87 + #define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a) 88 + #define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a) 89 + #endif
+18 -68
drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
··· 25 25 #include <core/memory.h> 26 26 #include <subdev/timer.h> 27 27 28 - static void 28 + void 29 29 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, 30 30 u32 size, u16 tag, u8 port, bool secure) 31 31 { ··· 89 89 } 90 90 } 91 91 92 - static const u32 EMEM_START_ADDR = 0x1000000; 93 - 94 - static void 92 + void 95 93 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, 96 - u32 size, u8 port) 94 + u32 size, u8 port) 97 95 { 96 + const struct nvkm_falcon_func *func = falcon->func; 98 97 u8 rem = size % 4; 99 98 int i; 100 99 101 - if (start >= EMEM_START_ADDR && falcon->has_emem) 100 + if (func->emem_addr && start >= func->emem_addr) 102 101 return nvkm_falcon_v1_load_emem(falcon, data, 103 - start - EMEM_START_ADDR, size, 102 + start - func->emem_addr, size, 104 103 port); 105 104 106 105 size -= rem; ··· 147 148 } 148 149 } 149 150 150 - static void 151 + void 151 152 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, 152 153 u8 port, void *data) 153 154 { 155 + const struct nvkm_falcon_func *func = falcon->func; 154 156 u8 rem = size % 4; 155 157 int i; 156 158 157 - if (start >= EMEM_START_ADDR && falcon->has_emem) 158 - return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR, 159 + if (func->emem_addr && start >= func->emem_addr) 160 + return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr, 159 161 size, port, data); 160 162 161 163 size -= rem; ··· 179 179 } 180 180 } 181 181 182 - static void 182 + void 183 183 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) 184 184 { 185 - struct nvkm_device *device = falcon->owner->device; 185 + const u32 fbif = falcon->func->fbif; 186 186 u32 inst_loc; 187 - u32 fbif; 188 187 189 188 /* disable instance block binding */ 190 189 if (ctx == NULL) { 191 190 nvkm_falcon_wr32(falcon, 0x10c, 0x0); 192 191 return; 193 - } 194 - 195 - switch (falcon->owner->index) { 196 - case NVKM_ENGINE_NVENC0: 197 - case NVKM_ENGINE_NVENC1: 198 - case NVKM_ENGINE_NVENC2: 199 - fbif = 0x800; 200 - break; 201 - case NVKM_SUBDEV_PMU: 202 - fbif = 0xe00; 203 - break; 204 - default: 205 - fbif = 0x600; 206 - break; 207 192 } 208 193 209 194 nvkm_falcon_wr32(falcon, 0x10c, 0x1); ··· 219 234 220 235 nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); 221 236 nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8); 222 - 223 - /* Not sure if this is a WAR for a HW issue, or some additional 224 - * programming sequence that's needed to properly complete the 225 - * context switch we trigger above. 226 - * 227 - * Fixes unreliability of booting the SEC2 RTOS on Quadro P620, 228 - * particularly when resuming from suspend. 229 - * 230 - * Also removes the need for an odd workaround where we needed 231 - * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before 232 - * the SEC2 RTOS would begin executing. 233 - */ 234 - switch (falcon->owner->index) { 235 - case NVKM_SUBDEV_GSP: 236 - case NVKM_ENGINE_SEC2: 237 - nvkm_msec(device, 10, 238 - u32 irqstat = nvkm_falcon_rd32(falcon, 0x008); 239 - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); 240 - if ((irqstat & 0x00000008) && 241 - (flcn0dc & 0x00007000) == 0x00005000) 242 - break; 243 - ); 244 - 245 - nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); 246 - nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); 247 - 248 - nvkm_msec(device, 10, 249 - u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc); 250 - if ((flcn0dc & 0x00007000) == 0x00000000) 251 - break; 252 - ); 253 - break; 254 - default: 255 - break; 256 - } 257 237 } 258 238 259 - static void 239 + void 260 240 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) 261 241 { 262 242 nvkm_falcon_wr32(falcon, 0x104, start_addr); 263 243 } 264 244 265 - static void 245 + void 266 246 nvkm_falcon_v1_start(struct nvkm_falcon *falcon) 267 247 { 268 248 u32 reg = nvkm_falcon_rd32(falcon, 0x100); ··· 238 288 nvkm_falcon_wr32(falcon, 0x100, 0x2); 239 289 } 240 290 241 - static int 291 + int 242 292 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) 243 293 { 244 294 struct nvkm_device *device = falcon->owner->device; ··· 251 301 return 0; 252 302 } 253 303 254 - static int 304 + int 255 305 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) 256 306 { 257 307 struct nvkm_device *device = falcon->owner->device; ··· 280 330 return 0; 281 331 } 282 332 283 - static int 333 + int 284 334 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) 285 335 { 286 336 struct nvkm_device *device = falcon->owner->device; ··· 302 352 return 0; 303 353 } 304 354 305 - static void 355 + void 306 356 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) 307 357 { 308 358 /* disable IRQs and wait for any previous code to complete */
+7
drivers/gpu/drm/nouveau/nvkm/nvfw/Kbuild
··· 1 + # SPDX-License-Identifier: MIT 2 + nvkm-y += nvkm/nvfw/fw.o 3 + nvkm-y += nvkm/nvfw/hs.o 4 + nvkm-y += nvkm/nvfw/ls.o 5 + 6 + nvkm-y += nvkm/nvfw/acr.o 7 + nvkm-y += nvkm/nvfw/flcn.o
+165
drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include <core/subdev.h> 23 + #include <nvfw/acr.h> 24 + 25 + void 26 + wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr) 27 + { 28 + nvkm_debug(subdev, "wprHeader\n"); 29 + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); 30 + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); 31 + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); 32 + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); 33 + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); 34 + } 35 + 36 + void 37 + wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr) 38 + { 39 + nvkm_debug(subdev, "wprHeader\n"); 40 + nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id); 41 + nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset); 42 + nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner); 43 + nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap); 44 + nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version); 45 + nvkm_debug(subdev, "\tstatus : %d\n", hdr->status); 46 + } 47 + 48 + void 49 + lsb_header_tail_dump(struct nvkm_subdev *subdev, 50 + struct lsb_header_tail *hdr) 51 + { 52 + nvkm_debug(subdev, "lsbHeader\n"); 53 + nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off); 54 + nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size); 55 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 56 + nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size); 57 + nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off); 58 + nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off); 59 + nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size); 60 + nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off); 61 + nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size); 62 + nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off); 63 + nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size); 64 + nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags); 65 + } 66 + 67 + void 68 + lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr) 69 + { 70 + lsb_header_tail_dump(subdev, &hdr->tail); 71 + } 72 + 73 + void 74 + lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr) 75 + { 76 + lsb_header_tail_dump(subdev, &hdr->tail); 77 + } 78 + 79 + void 80 + flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr) 81 + { 82 + int i; 83 + 84 + nvkm_debug(subdev, "acrDesc\n"); 85 + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); 86 + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); 87 + nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n", 88 + hdr->mmu_mem_range); 89 + nvkm_debug(subdev, "\tnoRegions : %d\n", 90 + hdr->regions.no_regions); 91 + 92 + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { 93 + nvkm_debug(subdev, "\tregion[%d] :\n", i); 94 + nvkm_debug(subdev, "\t startAddr : 0x%x\n", 95 + hdr->regions.region_props[i].start_addr); 96 + nvkm_debug(subdev, "\t endAddr : 0x%x\n", 97 + hdr->regions.region_props[i].end_addr); 98 + nvkm_debug(subdev, "\t regionId : %d\n", 99 + hdr->regions.region_props[i].region_id); 100 + nvkm_debug(subdev, "\t readMask : 0x%x\n", 101 + hdr->regions.region_props[i].read_mask); 102 + nvkm_debug(subdev, "\t writeMask : 0x%x\n", 103 + hdr->regions.region_props[i].write_mask); 104 + nvkm_debug(subdev, "\t clientMask : 0x%x\n", 105 + hdr->regions.region_props[i].client_mask); 106 + } 107 + 108 + nvkm_debug(subdev, "\tucodeBlobSize: %d\n", 109 + hdr->ucode_blob_size); 110 + nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n", 111 + hdr->ucode_blob_base); 112 + nvkm_debug(subdev, "\tvprEnabled : %d\n", 113 + hdr->vpr_desc.vpr_enabled); 114 + nvkm_debug(subdev, "\tvprStart : 0x%x\n", 115 + hdr->vpr_desc.vpr_start); 116 + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", 117 + hdr->vpr_desc.vpr_end); 118 + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", 119 + hdr->vpr_desc.hdcp_policies); 120 + } 121 + 122 + void 123 + flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr) 124 + { 125 + int i; 126 + 127 + nvkm_debug(subdev, "acrDesc\n"); 128 + nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id); 129 + nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset); 130 + nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n", 131 + hdr->mmu_memory_range); 132 + nvkm_debug(subdev, "\tnoRegions : %d\n", 133 + hdr->regions.no_regions); 134 + 135 + for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { 136 + nvkm_debug(subdev, "\tregion[%d] :\n", i); 137 + nvkm_debug(subdev, "\t startAddr : 0x%x\n", 138 + hdr->regions.region_props[i].start_addr); 139 + nvkm_debug(subdev, "\t endAddr : 0x%x\n", 140 + hdr->regions.region_props[i].end_addr); 141 + nvkm_debug(subdev, "\t regionId : %d\n", 142 + hdr->regions.region_props[i].region_id); 143 + nvkm_debug(subdev, "\t readMask : 0x%x\n", 144 + hdr->regions.region_props[i].read_mask); 145 + nvkm_debug(subdev, "\t writeMask : 0x%x\n", 146 + hdr->regions.region_props[i].write_mask); 147 + nvkm_debug(subdev, "\t clientMask : 0x%x\n", 148 + hdr->regions.region_props[i].client_mask); 149 + nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n", 150 + hdr->regions.region_props[i].shadow_mem_start_addr); 151 + } 152 + 153 + nvkm_debug(subdev, "\tucodeBlobSize : %d\n", 154 + hdr->ucode_blob_size); 155 + nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n", 156 + hdr->ucode_blob_base); 157 + nvkm_debug(subdev, "\tvprEnabled : %d\n", 158 + hdr->vpr_desc.vpr_enabled); 159 + nvkm_debug(subdev, "\tvprStart : 0x%x\n", 160 + hdr->vpr_desc.vpr_start); 161 + nvkm_debug(subdev, "\tvprEnd : 0x%x\n", 162 + hdr->vpr_desc.vpr_end); 163 + nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n", 164 + hdr->vpr_desc.hdcp_policies); 165 + }
+115
drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include <core/subdev.h> 23 + #include <nvfw/flcn.h> 24 + 25 + void 26 + loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr) 27 + { 28 + nvkm_debug(subdev, "loaderConfig\n"); 29 + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); 30 + nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base); 31 + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); 32 + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); 33 + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); 34 + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); 35 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 36 + nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base); 37 + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); 38 + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); 39 + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); 40 + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); 41 + nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1); 42 + } 43 + 44 + void 45 + loader_config_v1_dump(struct nvkm_subdev *subdev, 46 + const struct loader_config_v1 *hdr) 47 + { 48 + nvkm_debug(subdev, "loaderConfig\n"); 49 + nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved); 50 + nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx); 51 + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base); 52 + nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total); 53 + nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load); 54 + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); 55 + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); 56 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 57 + nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base); 58 + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); 59 + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); 60 + } 61 + 62 + void 63 + flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev, 64 + const struct flcn_bl_dmem_desc *hdr) 65 + { 66 + nvkm_debug(subdev, "flcnBlDmemDesc\n"); 67 + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", 68 + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], 69 + hdr->reserved[3]); 70 + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", 71 + hdr->signature[0], hdr->signature[1], hdr->signature[2], 72 + hdr->signature[3]); 73 + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); 74 + nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base); 75 + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); 76 + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); 77 + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); 78 + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); 79 + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); 80 + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); 81 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 82 + nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1); 83 + nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1); 84 + } 85 + 86 + void 87 + flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev, 88 + const struct flcn_bl_dmem_desc_v1 *hdr) 89 + { 90 + nvkm_debug(subdev, "flcnBlDmemDesc\n"); 91 + nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n", 92 + hdr->reserved[0], hdr->reserved[1], hdr->reserved[2], 93 + hdr->reserved[3]); 94 + nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n", 95 + hdr->signature[0], hdr->signature[1], hdr->signature[2], 96 + hdr->signature[3]); 97 + nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma); 98 + nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base); 99 + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off); 100 + nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size); 101 + nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off); 102 + nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size); 103 + nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point); 104 + nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base); 105 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 106 + } 107 + 108 + void 109 + flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev, 110 + const struct flcn_bl_dmem_desc_v2 *hdr) 111 + { 112 + flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr); 113 + nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc); 114 + nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv); 115 + }
+51
drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include <core/subdev.h> 23 + #include <nvfw/fw.h> 24 + 25 + const struct nvfw_bin_hdr * 26 + nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data) 27 + { 28 + const struct nvfw_bin_hdr *hdr = data; 29 + nvkm_debug(subdev, "binHdr:\n"); 30 + nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic); 31 + nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver); 32 + nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size); 33 + nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset); 34 + nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset); 35 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 36 + return hdr; 37 + } 38 + 39 + const struct nvfw_bl_desc * 40 + nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data) 41 + { 42 + const struct nvfw_bl_desc *hdr = data; 43 + nvkm_debug(subdev, "blDesc\n"); 44 + nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag); 45 + nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off); 46 + nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off); 47 + nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size); 48 + nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off); 49 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 50 + return hdr; 51 + }
+62
drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include <core/subdev.h> 23 + #include <nvfw/hs.h> 24 + 25 + const struct nvfw_hs_header * 26 + nvfw_hs_header(struct nvkm_subdev *subdev, const void *data) 27 + { 28 + const struct nvfw_hs_header *hdr = data; 29 + nvkm_debug(subdev, "hsHeader:\n"); 30 + nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset); 31 + nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size); 32 + nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset); 33 + nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size); 34 + nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc); 35 + nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig); 36 + nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset); 37 + nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size); 38 + return hdr; 39 + } 40 + 41 + const struct nvfw_hs_load_header * 42 + nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data) 43 + { 44 + const struct nvfw_hs_load_header *hdr = data; 45 + int i; 46 + 47 + nvkm_debug(subdev, "hsLoadHeader:\n"); 48 + nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", 49 + hdr->non_sec_code_off); 50 + nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n", 51 + hdr->non_sec_code_size); 52 + nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base); 53 + nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size); 54 + nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps); 55 + for (i = 0; i < hdr->num_apps; i++) { 56 + nvkm_debug(subdev, 57 + "\tApp[%d] : offset 0x%x size 0x%x\n", i, 58 + hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]); 59 + } 60 + 61 + return hdr; 62 + }
+108
drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include <core/subdev.h> 23 + #include <nvfw/ls.h> 24 + 25 + static void 26 + nvfw_ls_desc_head(struct nvkm_subdev *subdev, 27 + const struct nvfw_ls_desc_head *hdr) 28 + { 29 + char *date; 30 + 31 + nvkm_debug(subdev, "lsUcodeImgDesc:\n"); 32 + nvkm_debug(subdev, "\tdescriptorSize : %d\n", 33 + hdr->descriptor_size); 34 + nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size); 35 + nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n", 36 + hdr->tools_version); 37 + nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version); 38 + 39 + date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL); 40 + nvkm_debug(subdev, "\tdate : %s\n", date); 41 + kfree(date); 42 + 43 + nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n", 44 + hdr->bootloader_start_offset); 45 + nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n", 46 + hdr->bootloader_size); 47 + nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n", 48 + hdr->bootloader_imem_offset); 49 + nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n", 50 + hdr->bootloader_entry_point); 51 + 52 + nvkm_debug(subdev, "\tappStartOffset : 0x%x\n", 53 + hdr->app_start_offset); 54 + nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size); 55 + nvkm_debug(subdev, "\tappImemOffset : 0x%x\n", 56 + hdr->app_imem_offset); 57 + nvkm_debug(subdev, "\tappImemEntry : 0x%x\n", 58 + hdr->app_imem_entry); 59 + nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n", 60 + hdr->app_dmem_offset); 61 + nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n", 62 + hdr->app_resident_code_offset); 63 + nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n", 64 + hdr->app_resident_code_size); 65 + nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n", 66 + hdr->app_resident_data_offset); 67 + nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n", 68 + hdr->app_resident_data_size); 69 + } 70 + 71 + const struct nvfw_ls_desc * 72 + nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data) 73 + { 74 + const struct nvfw_ls_desc *hdr = data; 75 + int i; 76 + 77 + nvfw_ls_desc_head(subdev, &hdr->head); 78 + 79 + nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays); 80 + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { 81 + nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i, 82 + hdr->load_ovl[i].start, hdr->load_ovl[i].size); 83 + } 84 + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); 85 + 86 + return hdr; 87 + } 88 + 89 + const struct nvfw_ls_desc_v1 * 90 + nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data) 91 + { 92 + const struct nvfw_ls_desc_v1 *hdr = data; 93 + int i; 94 + 95 + nvfw_ls_desc_head(subdev, &hdr->head); 96 + 97 + nvkm_debug(subdev, "\tnbImemOverlays : %d\n", 98 + hdr->nb_imem_overlays); 99 + nvkm_debug(subdev, "\tnbDmemOverlays : %d\n", 100 + hdr->nb_imem_overlays); 101 + for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) { 102 + nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i, 103 + hdr->load_ovl[i].start, hdr->load_ovl[i].size); 104 + } 105 + nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed); 106 + 107 + return hdr; 108 + }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 + include $(src)/nvkm/subdev/acr/Kbuild 2 3 include $(src)/nvkm/subdev/bar/Kbuild 3 4 include $(src)/nvkm/subdev/bios/Kbuild 4 5 include $(src)/nvkm/subdev/bus/Kbuild ··· 20 19 include $(src)/nvkm/subdev/mxm/Kbuild 21 20 include $(src)/nvkm/subdev/pci/Kbuild 22 21 include $(src)/nvkm/subdev/pmu/Kbuild 23 - include $(src)/nvkm/subdev/secboot/Kbuild 24 22 include $(src)/nvkm/subdev/therm/Kbuild 25 23 include $(src)/nvkm/subdev/timer/Kbuild 26 24 include $(src)/nvkm/subdev/top/Kbuild
+10
drivers/gpu/drm/nouveau/nvkm/subdev/acr/Kbuild
··· 1 + # SPDX-License-Identifier: MIT 2 + nvkm-y += nvkm/subdev/acr/base.o 3 + nvkm-y += nvkm/subdev/acr/hsfw.o 4 + nvkm-y += nvkm/subdev/acr/lsfw.o 5 + nvkm-y += nvkm/subdev/acr/gm200.o 6 + nvkm-y += nvkm/subdev/acr/gm20b.o 7 + nvkm-y += nvkm/subdev/acr/gp102.o 8 + nvkm-y += nvkm/subdev/acr/gp108.o 9 + nvkm-y += nvkm/subdev/acr/gp10b.o 10 + nvkm-y += nvkm/subdev/acr/tu102.o
+411
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/firmware.h> 25 + #include <core/memory.h> 26 + #include <subdev/mmu.h> 27 + 28 + static struct nvkm_acr_hsf * 29 + nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) 30 + { 31 + struct nvkm_acr_hsf *hsf; 32 + list_for_each_entry(hsf, &acr->hsf, head) { 33 + if (!strcmp(hsf->name, name)) 34 + return hsf; 35 + } 36 + return NULL; 37 + } 38 + 39 + int 40 + nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) 41 + { 42 + struct nvkm_subdev *subdev = &acr->subdev; 43 + struct nvkm_acr_hsf *hsf; 44 + int ret; 45 + 46 + hsf = nvkm_acr_hsf_find(acr, name); 47 + if (!hsf) 48 + return -EINVAL; 49 + 50 + nvkm_debug(subdev, "executing %s binary\n", hsf->name); 51 + ret = nvkm_falcon_get(hsf->falcon, subdev); 52 + if (ret) 53 + return ret; 54 + 55 + ret = hsf->func->boot(acr, hsf); 56 + nvkm_falcon_put(hsf->falcon, subdev); 57 + if (ret) { 58 + nvkm_error(subdev, "%s binary failed\n", hsf->name); 59 + return ret; 60 + } 61 + 62 + nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); 63 + return 0; 64 + } 65 + 66 + static void 67 + nvkm_acr_unload(struct nvkm_acr *acr) 68 + { 69 + if (acr->done) { 70 + nvkm_acr_hsf_boot(acr, "unload"); 71 + acr->done = false; 72 + } 73 + } 74 + 75 + static int 76 + nvkm_acr_load(struct nvkm_acr *acr) 77 + { 78 + struct nvkm_subdev *subdev = &acr->subdev; 79 + struct nvkm_acr_lsf *lsf; 80 + u64 start, limit; 81 + int ret; 82 + 83 + if (list_empty(&acr->lsf)) { 84 + nvkm_debug(subdev, "No LSF(s) present.\n"); 85 + return 0; 86 + } 87 + 88 + ret = acr->func->init(acr); 89 + if (ret) 90 + return ret; 91 + 92 + acr->func->wpr_check(acr, &start, &limit); 93 + 94 + if (start != acr->wpr_start || limit != acr->wpr_end) { 95 + nvkm_error(subdev, "WPR not configured as expected: " 96 + "%016llx-%016llx vs %016llx-%016llx\n", 97 + acr->wpr_start, acr->wpr_end, start, limit); 98 + return -EIO; 99 + } 100 + 101 + acr->done = true; 102 + 103 + list_for_each_entry(lsf, &acr->lsf, head) { 104 + if (lsf->func->boot) { 105 + ret = lsf->func->boot(lsf->falcon); 106 + if (ret) 107 + break; 108 + } 109 + } 110 + 111 + return ret; 112 + } 113 + 114 + static int 115 + nvkm_acr_reload(struct nvkm_acr *acr) 116 + { 117 + nvkm_acr_unload(acr); 118 + return nvkm_acr_load(acr); 119 + } 120 + 121 + static struct nvkm_acr_lsf * 122 + nvkm_acr_falcon(struct nvkm_device *device) 123 + { 124 + struct nvkm_acr *acr = device->acr; 125 + struct nvkm_acr_lsf *lsf; 126 + 127 + if (acr) { 128 + list_for_each_entry(lsf, &acr->lsf, head) { 129 + if (lsf->func->bootstrap_falcon) 130 + return lsf; 131 + } 132 + } 133 + 134 + return NULL; 135 + } 136 + 137 + int 138 + nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) 139 + { 140 + struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device); 141 + struct nvkm_acr *acr = device->acr; 142 + unsigned long id; 143 + 144 + if (!acrflcn) { 145 + int ret = nvkm_acr_reload(acr); 146 + if (ret) 147 + return ret; 148 + 149 + return acr->done ? 0 : -EINVAL; 150 + } 151 + 152 + if (acrflcn->func->bootstrap_multiple_falcons) { 153 + return acrflcn->func-> 154 + bootstrap_multiple_falcons(acrflcn->falcon, mask); 155 + } 156 + 157 + for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { 158 + int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); 159 + if (ret) 160 + return ret; 161 + } 162 + 163 + return 0; 164 + } 165 + 166 + bool 167 + nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) 168 + { 169 + struct nvkm_acr *acr = device->acr; 170 + struct nvkm_acr_lsf *lsf; 171 + 172 + if (acr) { 173 + list_for_each_entry(lsf, &acr->lsf, head) { 174 + if (lsf->id == id) 175 + return true; 176 + } 177 + } 178 + 179 + return false; 180 + } 181 + 182 + static int 183 + nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 184 + { 185 + nvkm_acr_unload(nvkm_acr(subdev)); 186 + return 0; 187 + } 188 + 189 + static int 190 + nvkm_acr_init(struct nvkm_subdev *subdev) 191 + { 192 + if (!nvkm_acr_falcon(subdev->device)) 193 + return 0; 194 + 195 + return nvkm_acr_load(nvkm_acr(subdev)); 196 + } 197 + 198 + static void 199 + nvkm_acr_cleanup(struct nvkm_acr *acr) 200 + { 201 + nvkm_acr_lsfw_del_all(acr); 202 + nvkm_acr_hsfw_del_all(acr); 203 + nvkm_firmware_put(acr->wpr_fw); 204 + acr->wpr_fw = NULL; 205 + } 206 + 207 + static int 208 + nvkm_acr_oneinit(struct nvkm_subdev *subdev) 209 + { 210 + struct nvkm_device *device = subdev->device; 211 + struct nvkm_acr *acr = nvkm_acr(subdev); 212 + struct nvkm_acr_hsfw *hsfw; 213 + struct nvkm_acr_lsfw *lsfw, *lsft; 214 + struct nvkm_acr_lsf *lsf; 215 + u32 wpr_size = 0; 216 + int ret, i; 217 + 218 + if (list_empty(&acr->hsfw)) { 219 + nvkm_debug(subdev, "No HSFW(s)\n"); 220 + nvkm_acr_cleanup(acr); 221 + return 0; 222 + } 223 + 224 + /* Determine layout/size of WPR image up-front, as we need to know 225 + * it to allocate memory before we begin constructing it. 226 + */ 227 + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 228 + /* Cull unknown falcons that are present in WPR image. */ 229 + if (acr->wpr_fw) { 230 + if (!lsfw->func) { 231 + nvkm_acr_lsfw_del(lsfw); 232 + continue; 233 + } 234 + 235 + wpr_size = acr->wpr_fw->size; 236 + } 237 + 238 + /* Ensure we've fetched falcon configuration. */ 239 + ret = nvkm_falcon_get(lsfw->falcon, subdev); 240 + if (ret) 241 + return ret; 242 + 243 + nvkm_falcon_put(lsfw->falcon, subdev); 244 + 245 + if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) 246 + return -ENOMEM; 247 + lsf->func = lsfw->func; 248 + lsf->falcon = lsfw->falcon; 249 + lsf->id = lsfw->id; 250 + list_add_tail(&lsf->head, &acr->lsf); 251 + } 252 + 253 + if (!acr->wpr_fw || acr->wpr_comp) 254 + wpr_size = acr->func->wpr_layout(acr); 255 + 256 + /* Allocate/Locate WPR + fill ucode blob pointer. 257 + * 258 + * dGPU: allocate WPR + shadow blob 259 + * Tegra: locate WPR with regs, ensure size is sufficient, 260 + * allocate ucode blob. 261 + */ 262 + ret = acr->func->wpr_alloc(acr, wpr_size); 263 + if (ret) 264 + return ret; 265 + 266 + nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", 267 + acr->wpr_start, acr->wpr_end, acr->shadow_start); 268 + 269 + /* Write WPR to ucode blob. */ 270 + nvkm_kmap(acr->wpr); 271 + if (acr->wpr_fw && !acr->wpr_comp) 272 + nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); 273 + 274 + if (!acr->wpr_fw || acr->wpr_comp) 275 + acr->func->wpr_build(acr, nvkm_acr_falcon(device)); 276 + acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); 277 + 278 + if (acr->wpr_fw && acr->wpr_comp) { 279 + nvkm_kmap(acr->wpr); 280 + for (i = 0; i < acr->wpr_fw->size; i += 4) { 281 + u32 us = nvkm_ro32(acr->wpr, i); 282 + u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; 283 + if (fw != us) { 284 + nvkm_warn(subdev, "%08x: %08x %08x\n", 285 + i, us, fw); 286 + } 287 + } 288 + return -EINVAL; 289 + } 290 + nvkm_done(acr->wpr); 291 + 292 + /* Allocate instance block for ACR-related stuff. */ 293 + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, 294 + &acr->inst); 295 + if (ret) 296 + return ret; 297 + 298 + ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); 299 + if (ret) 300 + return ret; 301 + 302 + acr->vmm->debug = acr->subdev.debug; 303 + 304 + ret = nvkm_vmm_join(acr->vmm, acr->inst); 305 + if (ret) 306 + return ret; 307 + 308 + /* Load HS firmware blobs into ACR VMM. */ 309 + list_for_each_entry(hsfw, &acr->hsfw, head) { 310 + nvkm_debug(subdev, "loading %s fw\n", hsfw->name); 311 + ret = hsfw->func->load(acr, hsfw); 312 + if (ret) 313 + return ret; 314 + } 315 + 316 + /* Kill temporary data. */ 317 + nvkm_acr_cleanup(acr); 318 + return 0; 319 + } 320 + 321 + static void * 322 + nvkm_acr_dtor(struct nvkm_subdev *subdev) 323 + { 324 + struct nvkm_acr *acr = nvkm_acr(subdev); 325 + struct nvkm_acr_hsf *hsf, *hst; 326 + struct nvkm_acr_lsf *lsf, *lst; 327 + 328 + list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { 329 + nvkm_vmm_put(acr->vmm, &hsf->vma); 330 + nvkm_memory_unref(&hsf->ucode); 331 + kfree(hsf->imem); 332 + list_del(&hsf->head); 333 + kfree(hsf); 334 + } 335 + 336 + nvkm_vmm_part(acr->vmm, acr->inst); 337 + nvkm_vmm_unref(&acr->vmm); 338 + nvkm_memory_unref(&acr->inst); 339 + 340 + nvkm_memory_unref(&acr->wpr); 341 + 342 + list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { 343 + list_del(&lsf->head); 344 + kfree(lsf); 345 + } 346 + 347 + nvkm_acr_cleanup(acr); 348 + return acr; 349 + } 350 + 351 + static const struct nvkm_subdev_func 352 + nvkm_acr = { 353 + .dtor = nvkm_acr_dtor, 354 + .oneinit = nvkm_acr_oneinit, 355 + .init = nvkm_acr_init, 356 + .fini = nvkm_acr_fini, 357 + }; 358 + 359 + static int 360 + nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) 361 + { 362 + struct nvkm_subdev *subdev = &acr->subdev; 363 + struct nvkm_device *device = subdev->device; 364 + int ret; 365 + 366 + ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); 367 + if (ret < 0) 368 + return ret; 369 + 370 + /* Pre-add LSFs in the order they appear in the FW WPR image so that 371 + * we're able to do a binary comparison with our own generator. 372 + */ 373 + ret = acr->func->wpr_parse(acr); 374 + if (ret) 375 + return ret; 376 + 377 + acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); 378 + acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); 379 + return 0; 380 + } 381 + 382 + int 383 + nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, 384 + int index, struct nvkm_acr **pacr) 385 + { 386 + struct nvkm_acr *acr; 387 + long wprfw; 388 + 389 + if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) 390 + return -ENOMEM; 391 + nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev); 392 + INIT_LIST_HEAD(&acr->hsfw); 393 + INIT_LIST_HEAD(&acr->lsfw); 394 + INIT_LIST_HEAD(&acr->hsf); 395 + INIT_LIST_HEAD(&acr->lsf); 396 + 397 + fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); 398 + if (IS_ERR(fwif)) 399 + return PTR_ERR(fwif); 400 + 401 + acr->func = fwif->func; 402 + 403 + wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); 404 + if (wprfw >= 0) { 405 + int ret = nvkm_acr_ctor_wpr(acr, wprfw); 406 + if (ret) 407 + return ret; 408 + } 409 + 410 + return 0; 411 + }
+470
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/falcon.h> 25 + #include <core/firmware.h> 26 + #include <core/memory.h> 27 + #include <subdev/mc.h> 28 + #include <subdev/mmu.h> 29 + #include <subdev/pmu.h> 30 + #include <subdev/timer.h> 31 + 32 + #include <nvfw/acr.h> 33 + #include <nvfw/flcn.h> 34 + 35 + int 36 + gm200_acr_init(struct nvkm_acr *acr) 37 + { 38 + return nvkm_acr_hsf_boot(acr, "load"); 39 + } 40 + 41 + void 42 + gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit) 43 + { 44 + struct nvkm_device *device = acr->subdev.device; 45 + 46 + nvkm_wr32(device, 0x100cd4, 2); 47 + *start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; 48 + nvkm_wr32(device, 0x100cd4, 3); 49 + *limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8; 50 + *limit = *limit + 0x20000; 51 + } 52 + 53 + void 54 + gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) 55 + { 56 + struct nvkm_subdev *subdev = &acr->subdev; 57 + struct wpr_header hdr; 58 + struct lsb_header lsb; 59 + struct nvkm_acr_lsf *lsfw; 60 + u32 offset = 0; 61 + 62 + do { 63 + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); 64 + wpr_header_dump(subdev, &hdr); 65 + 66 + list_for_each_entry(lsfw, &acr->lsfw, head) { 67 + if (lsfw->id != hdr.falcon_id) 68 + continue; 69 + 70 + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); 71 + lsb_header_dump(subdev, &lsb); 72 + 73 + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); 74 + break; 75 + } 76 + offset += sizeof(hdr); 77 + } while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID); 78 + } 79 + 80 + void 81 + gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw, 82 + struct lsb_header_tail *hdr) 83 + { 84 + hdr->ucode_off = lsfw->offset.img; 85 + hdr->ucode_size = lsfw->ucode_size; 86 + hdr->data_size = lsfw->data_size; 87 + hdr->bl_code_size = lsfw->bootloader_size; 88 + hdr->bl_imem_off = lsfw->bootloader_imem_offset; 89 + hdr->bl_data_off = lsfw->offset.bld; 90 + hdr->bl_data_size = lsfw->bl_data_size; 91 + hdr->app_code_off = lsfw->app_start_offset + 92 + lsfw->app_resident_code_offset; 93 + hdr->app_code_size = lsfw->app_resident_code_size; 94 + hdr->app_data_off = lsfw->app_start_offset + 95 + lsfw->app_resident_data_offset; 96 + hdr->app_data_size = lsfw->app_resident_data_size; 97 + hdr->flags = lsfw->func->flags; 98 + } 99 + 100 + static int 101 + gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) 102 + { 103 + struct lsb_header hdr; 104 + 105 + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) 106 + return -EINVAL; 107 + 108 + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); 109 + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); 110 + 111 + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); 112 + return 0; 113 + } 114 + 115 + int 116 + gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) 117 + { 118 + struct nvkm_acr_lsfw *lsfw; 119 + u32 offset = 0; 120 + int ret; 121 + 122 + /* Fill per-LSF structures. */ 123 + list_for_each_entry(lsfw, &acr->lsfw, head) { 124 + struct wpr_header hdr = { 125 + .falcon_id = lsfw->id, 126 + .lsb_offset = lsfw->offset.lsb, 127 + .bootstrap_owner = NVKM_ACR_LSF_PMU, 128 + .lazy_bootstrap = rtos && lsfw->id != rtos->id, 129 + .status = WPR_HEADER_V0_STATUS_COPY, 130 + }; 131 + 132 + /* Write WPR header. */ 133 + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); 134 + offset += sizeof(hdr); 135 + 136 + /* Write LSB header. */ 137 + ret = gm200_acr_wpr_build_lsb(acr, lsfw); 138 + if (ret) 139 + return ret; 140 + 141 + /* Write ucode image. */ 142 + nvkm_wobj(acr->wpr, lsfw->offset.img, 143 + lsfw->img.data, 144 + lsfw->img.size); 145 + 146 + /* Write bootloader data. */ 147 + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); 148 + } 149 + 150 + /* Finalise WPR. */ 151 + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID); 152 + return 0; 153 + } 154 + 155 + static int 156 + gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) 157 + { 158 + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, 159 + ALIGN(wpr_size, 0x40000), 0x40000, true, 160 + &acr->wpr); 161 + if (ret) 162 + return ret; 163 + 164 + acr->wpr_start = nvkm_memory_addr(acr->wpr); 165 + acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr); 166 + return 0; 167 + } 168 + 169 + u32 170 + gm200_acr_wpr_layout(struct nvkm_acr *acr) 171 + { 172 + struct nvkm_acr_lsfw *lsfw; 173 + u32 wpr = 0; 174 + 175 + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header); 176 + 177 + list_for_each_entry(lsfw, &acr->lsfw, head) { 178 + wpr = ALIGN(wpr, 256); 179 + lsfw->offset.lsb = wpr; 180 + wpr += sizeof(struct lsb_header); 181 + 182 + wpr = ALIGN(wpr, 4096); 183 + lsfw->offset.img = wpr; 184 + wpr += lsfw->img.size; 185 + 186 + wpr = ALIGN(wpr, 256); 187 + lsfw->offset.bld = wpr; 188 + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); 189 + wpr += lsfw->bl_data_size; 190 + } 191 + 192 + return wpr; 193 + } 194 + 195 + int 196 + gm200_acr_wpr_parse(struct nvkm_acr *acr) 197 + { 198 + const struct wpr_header *hdr = (void *)acr->wpr_fw->data; 199 + 200 + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { 201 + wpr_header_dump(&acr->subdev, hdr); 202 + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) 203 + return -ENOMEM; 204 + } 205 + 206 + return 0; 207 + } 208 + 209 + void 210 + gm200_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 211 + { 212 + struct flcn_bl_dmem_desc_v1 hsdesc = { 213 + .ctx_dma = FALCON_DMAIDX_VIRT, 214 + .code_dma_base = hsf->vma->addr, 215 + .non_sec_code_off = hsf->non_sec_addr, 216 + .non_sec_code_size = hsf->non_sec_size, 217 + .sec_code_off = hsf->sec_addr, 218 + .sec_code_size = hsf->sec_size, 219 + .code_entry_point = 0, 220 + .data_dma_base = hsf->vma->addr + hsf->data_addr, 221 + .data_size = hsf->data_size, 222 + }; 223 + 224 + flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hsdesc); 225 + 226 + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); 227 + } 228 + 229 + int 230 + gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, 231 + u32 intr_clear, u32 mbox0_ok) 232 + { 233 + struct nvkm_subdev *subdev = &acr->subdev; 234 + struct nvkm_device *device = subdev->device; 235 + struct nvkm_falcon *falcon = hsf->falcon; 236 + u32 mbox0, mbox1; 237 + int ret; 238 + 239 + /* Reset falcon. */ 240 + nvkm_falcon_reset(falcon); 241 + nvkm_falcon_bind_context(falcon, acr->inst); 242 + 243 + /* Load bootloader into IMEM. */ 244 + nvkm_falcon_load_imem(falcon, hsf->imem, 245 + falcon->code.limit - hsf->imem_size, 246 + hsf->imem_size, 247 + hsf->imem_tag, 248 + 0, false); 249 + 250 + /* Load bootloader data into DMEM. */ 251 + hsf->func->bld(acr, hsf); 252 + 253 + /* Boot the falcon. */ 254 + nvkm_mc_intr_mask(device, falcon->owner->index, false); 255 + 256 + nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); 257 + nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8); 258 + nvkm_falcon_start(falcon); 259 + ret = nvkm_falcon_wait_for_halt(falcon, 100); 260 + if (ret) 261 + return ret; 262 + 263 + /* Check for successful completion. */ 264 + mbox0 = nvkm_falcon_rd32(falcon, 0x040); 265 + mbox1 = nvkm_falcon_rd32(falcon, 0x044); 266 + nvkm_debug(subdev, "mailbox %08x %08x\n", mbox0, mbox1); 267 + if (mbox0 && mbox0 != mbox0_ok) 268 + return -EIO; 269 + 270 + nvkm_falcon_clear_interrupt(falcon, intr_clear); 271 + nvkm_mc_intr_mask(device, falcon->owner->index, true); 272 + return ret; 273 + } 274 + 275 + int 276 + gm200_acr_hsfw_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw, 277 + struct nvkm_falcon *falcon) 278 + { 279 + struct nvkm_subdev *subdev = &acr->subdev; 280 + struct nvkm_acr_hsf *hsf; 281 + int ret; 282 + 283 + /* Patch the appropriate signature (production/debug) into the FW 284 + * image, as determined by the mode the falcon is in. 285 + */ 286 + ret = nvkm_falcon_get(falcon, subdev); 287 + if (ret) 288 + return ret; 289 + 290 + if (hsfw->sig.patch_loc) { 291 + if (!falcon->debug) { 292 + nvkm_debug(subdev, "patching production signature\n"); 293 + memcpy(hsfw->image + hsfw->sig.patch_loc, 294 + hsfw->sig.prod.data, 295 + hsfw->sig.prod.size); 296 + } else { 297 + nvkm_debug(subdev, "patching debug signature\n"); 298 + memcpy(hsfw->image + hsfw->sig.patch_loc, 299 + hsfw->sig.dbg.data, 300 + hsfw->sig.dbg.size); 301 + } 302 + } 303 + 304 + nvkm_falcon_put(falcon, subdev); 305 + 306 + if (!(hsf = kzalloc(sizeof(*hsf), GFP_KERNEL))) 307 + return -ENOMEM; 308 + hsf->func = hsfw->func; 309 + hsf->name = hsfw->name; 310 + list_add_tail(&hsf->head, &acr->hsf); 311 + 312 + hsf->imem_size = hsfw->imem_size; 313 + hsf->imem_tag = hsfw->imem_tag; 314 + hsf->imem = kmemdup(hsfw->imem, hsfw->imem_size, GFP_KERNEL); 315 + if (!hsf->imem) 316 + return -ENOMEM; 317 + 318 + hsf->non_sec_addr = hsfw->non_sec_addr; 319 + hsf->non_sec_size = hsfw->non_sec_size; 320 + hsf->sec_addr = hsfw->sec_addr; 321 + hsf->sec_size = hsfw->sec_size; 322 + hsf->data_addr = hsfw->data_addr; 323 + hsf->data_size = hsfw->data_size; 324 + 325 + /* Make the FW image accessible to the HS bootloader. */ 326 + ret = nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, 327 + hsfw->image_size, 0x1000, false, &hsf->ucode); 328 + if (ret) 329 + return ret; 330 + 331 + nvkm_kmap(hsf->ucode); 332 + nvkm_wobj(hsf->ucode, 0, hsfw->image, hsfw->image_size); 333 + nvkm_done(hsf->ucode); 334 + 335 + ret = nvkm_vmm_get(acr->vmm, 12, nvkm_memory_size(hsf->ucode), 336 + &hsf->vma); 337 + if (ret) 338 + return ret; 339 + 340 + ret = nvkm_memory_map(hsf->ucode, 0, acr->vmm, hsf->vma, NULL, 0); 341 + if (ret) 342 + return ret; 343 + 344 + hsf->falcon = falcon; 345 + return 0; 346 + } 347 + 348 + int 349 + gm200_acr_unload_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 350 + { 351 + return gm200_acr_hsfw_boot(acr, hsf, 0, 0x1d); 352 + } 353 + 354 + int 355 + gm200_acr_unload_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) 356 + { 357 + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); 358 + } 359 + 360 + const struct nvkm_acr_hsf_func 361 + gm200_acr_unload_0 = { 362 + .load = gm200_acr_unload_load, 363 + .boot = gm200_acr_unload_boot, 364 + .bld = gm200_acr_hsfw_bld, 365 + }; 366 + 367 + MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); 368 + MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); 369 + MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); 370 + MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); 371 + 372 + static const struct nvkm_acr_hsf_fwif 373 + gm200_acr_unload_fwif[] = { 374 + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, 375 + {} 376 + }; 377 + 378 + int 379 + gm200_acr_load_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 380 + { 381 + return gm200_acr_hsfw_boot(acr, hsf, 0x10, 0); 382 + } 383 + 384 + static int 385 + gm200_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) 386 + { 387 + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; 388 + 389 + desc->wpr_region_id = 1; 390 + desc->regions.no_regions = 2; 391 + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; 392 + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; 393 + desc->regions.region_props[0].region_id = 1; 394 + desc->regions.region_props[0].read_mask = 0xf; 395 + desc->regions.region_props[0].write_mask = 0xc; 396 + desc->regions.region_props[0].client_mask = 0x2; 397 + flcn_acr_desc_dump(&acr->subdev, desc); 398 + 399 + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); 400 + } 401 + 402 + static const struct nvkm_acr_hsf_func 403 + gm200_acr_load_0 = { 404 + .load = gm200_acr_load_load, 405 + .boot = gm200_acr_load_boot, 406 + .bld = gm200_acr_hsfw_bld, 407 + }; 408 + 409 + MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); 410 + MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); 411 + 412 + MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); 413 + MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); 414 + 415 + MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); 416 + MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); 417 + 418 + MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); 419 + MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); 420 + 421 + static const struct nvkm_acr_hsf_fwif 422 + gm200_acr_load_fwif[] = { 423 + { 0, nvkm_acr_hsfw_load, &gm200_acr_load_0 }, 424 + {} 425 + }; 426 + 427 + static const struct nvkm_acr_func 428 + gm200_acr = { 429 + .load = gm200_acr_load_fwif, 430 + .unload = gm200_acr_unload_fwif, 431 + .wpr_parse = gm200_acr_wpr_parse, 432 + .wpr_layout = gm200_acr_wpr_layout, 433 + .wpr_alloc = gm200_acr_wpr_alloc, 434 + .wpr_build = gm200_acr_wpr_build, 435 + .wpr_patch = gm200_acr_wpr_patch, 436 + .wpr_check = gm200_acr_wpr_check, 437 + .init = gm200_acr_init, 438 + }; 439 + 440 + static int 441 + gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) 442 + { 443 + struct nvkm_subdev *subdev = &acr->subdev; 444 + const struct nvkm_acr_hsf_fwif *hsfwif; 445 + 446 + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", 447 + acr, "acr/bl", "acr/ucode_load", "load"); 448 + if (IS_ERR(hsfwif)) 449 + return PTR_ERR(hsfwif); 450 + 451 + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", 452 + acr, "acr/bl", "acr/ucode_unload", 453 + "unload"); 454 + if (IS_ERR(hsfwif)) 455 + return PTR_ERR(hsfwif); 456 + 457 + return 0; 458 + } 459 + 460 + static const struct nvkm_acr_fwif 461 + gm200_acr_fwif[] = { 462 + { 0, gm200_acr_load, &gm200_acr }, 463 + {} 464 + }; 465 + 466 + int 467 + gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 468 + { 469 + return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr); 470 + }
+134
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/firmware.h> 25 + #include <core/memory.h> 26 + #include <subdev/mmu.h> 27 + #include <subdev/pmu.h> 28 + 29 + #include <nvfw/acr.h> 30 + #include <nvfw/flcn.h> 31 + 32 + int 33 + gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) 34 + { 35 + struct nvkm_subdev *subdev = &acr->subdev; 36 + 37 + acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end); 38 + 39 + if ((acr->wpr_end - acr->wpr_start) < wpr_size) { 40 + nvkm_error(subdev, "WPR image too big for WPR!\n"); 41 + return -ENOSPC; 42 + } 43 + 44 + return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST, 45 + wpr_size, 0, true, &acr->wpr); 46 + } 47 + 48 + static void 49 + gm20b_acr_load_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 50 + { 51 + struct flcn_bl_dmem_desc hsdesc = { 52 + .ctx_dma = FALCON_DMAIDX_VIRT, 53 + .code_dma_base = hsf->vma->addr >> 8, 54 + .non_sec_code_off = hsf->non_sec_addr, 55 + .non_sec_code_size = hsf->non_sec_size, 56 + .sec_code_off = hsf->sec_addr, 57 + .sec_code_size = hsf->sec_size, 58 + .code_entry_point = 0, 59 + .data_dma_base = (hsf->vma->addr + hsf->data_addr) >> 8, 60 + .data_size = hsf->data_size, 61 + }; 62 + 63 + flcn_bl_dmem_desc_dump(&acr->subdev, &hsdesc); 64 + 65 + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); 66 + } 67 + 68 + static int 69 + gm20b_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) 70 + { 71 + struct flcn_acr_desc *desc = (void *)&hsfw->image[hsfw->data_addr]; 72 + 73 + desc->ucode_blob_base = nvkm_memory_addr(acr->wpr); 74 + desc->ucode_blob_size = nvkm_memory_size(acr->wpr); 75 + flcn_acr_desc_dump(&acr->subdev, desc); 76 + 77 + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->pmu->falcon); 78 + } 79 + 80 + const struct nvkm_acr_hsf_func 81 + gm20b_acr_load_0 = { 82 + .load = gm20b_acr_load_load, 83 + .boot = gm200_acr_load_boot, 84 + .bld = gm20b_acr_load_bld, 85 + }; 86 + 87 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 88 + MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); 89 + MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); 90 + #endif 91 + 92 + static const struct nvkm_acr_hsf_fwif 93 + gm20b_acr_load_fwif[] = { 94 + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, 95 + {} 96 + }; 97 + 98 + static const struct nvkm_acr_func 99 + gm20b_acr = { 100 + .load = gm20b_acr_load_fwif, 101 + .wpr_parse = gm200_acr_wpr_parse, 102 + .wpr_layout = gm200_acr_wpr_layout, 103 + .wpr_alloc = gm20b_acr_wpr_alloc, 104 + .wpr_build = gm200_acr_wpr_build, 105 + .wpr_patch = gm200_acr_wpr_patch, 106 + .wpr_check = gm200_acr_wpr_check, 107 + .init = gm200_acr_init, 108 + }; 109 + 110 + int 111 + gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) 112 + { 113 + struct nvkm_subdev *subdev = &acr->subdev; 114 + const struct nvkm_acr_hsf_fwif *hsfwif; 115 + 116 + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", 117 + acr, "acr/bl", "acr/ucode_load", "load"); 118 + if (IS_ERR(hsfwif)) 119 + return PTR_ERR(hsfwif); 120 + 121 + return 0; 122 + } 123 + 124 + static const struct nvkm_acr_fwif 125 + gm20b_acr_fwif[] = { 126 + { 0, gm20b_acr_load, &gm20b_acr }, 127 + {} 128 + }; 129 + 130 + int 131 + gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 132 + { 133 + return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr); 134 + }
+281
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/firmware.h> 25 + #include <core/memory.h> 26 + #include <subdev/mmu.h> 27 + #include <engine/sec2.h> 28 + 29 + #include <nvfw/acr.h> 30 + #include <nvfw/flcn.h> 31 + 32 + void 33 + gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust) 34 + { 35 + struct wpr_header_v1 hdr; 36 + struct lsb_header_v1 lsb; 37 + struct nvkm_acr_lsfw *lsfw; 38 + u32 offset = 0; 39 + 40 + do { 41 + nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr)); 42 + wpr_header_v1_dump(&acr->subdev, &hdr); 43 + 44 + list_for_each_entry(lsfw, &acr->lsfw, head) { 45 + if (lsfw->id != hdr.falcon_id) 46 + continue; 47 + 48 + nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb)); 49 + lsb_header_v1_dump(&acr->subdev, &lsb); 50 + 51 + lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust); 52 + break; 53 + } 54 + 55 + offset += sizeof(hdr); 56 + } while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID); 57 + } 58 + 59 + int 60 + gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw) 61 + { 62 + struct lsb_header_v1 hdr; 63 + 64 + if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature))) 65 + return -EINVAL; 66 + 67 + memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size); 68 + gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail); 69 + 70 + nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr)); 71 + return 0; 72 + } 73 + 74 + int 75 + gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) 76 + { 77 + struct nvkm_acr_lsfw *lsfw; 78 + u32 offset = 0; 79 + int ret; 80 + 81 + /* Fill per-LSF structures. */ 82 + list_for_each_entry(lsfw, &acr->lsfw, head) { 83 + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; 84 + struct wpr_header_v1 hdr = { 85 + .falcon_id = lsfw->id, 86 + .lsb_offset = lsfw->offset.lsb, 87 + .bootstrap_owner = NVKM_ACR_LSF_SEC2, 88 + .lazy_bootstrap = rtos && lsfw->id != rtos->id, 89 + .bin_version = sig->version, 90 + .status = WPR_HEADER_V1_STATUS_COPY, 91 + }; 92 + 93 + /* Write WPR header. */ 94 + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); 95 + offset += sizeof(hdr); 96 + 97 + /* Write LSB header. */ 98 + ret = gp102_acr_wpr_build_lsb(acr, lsfw); 99 + if (ret) 100 + return ret; 101 + 102 + /* Write ucode image. */ 103 + nvkm_wobj(acr->wpr, lsfw->offset.img, 104 + lsfw->img.data, 105 + lsfw->img.size); 106 + 107 + /* Write bootloader data. */ 108 + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); 109 + } 110 + 111 + /* Finalise WPR. */ 112 + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); 113 + return 0; 114 + } 115 + 116 + int 117 + gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size) 118 + { 119 + int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST, 120 + ALIGN(wpr_size, 0x40000) << 1, 0x40000, true, 121 + &acr->wpr); 122 + if (ret) 123 + return ret; 124 + 125 + acr->shadow_start = nvkm_memory_addr(acr->wpr); 126 + acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1); 127 + acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1); 128 + return 0; 129 + } 130 + 131 + u32 132 + gp102_acr_wpr_layout(struct nvkm_acr *acr) 133 + { 134 + struct nvkm_acr_lsfw *lsfw; 135 + u32 wpr = 0; 136 + 137 + wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1); 138 + wpr = ALIGN(wpr, 256); 139 + 140 + wpr += 0x100; /* Shared sub-WPR headers. */ 141 + 142 + list_for_each_entry(lsfw, &acr->lsfw, head) { 143 + wpr = ALIGN(wpr, 256); 144 + lsfw->offset.lsb = wpr; 145 + wpr += sizeof(struct lsb_header_v1); 146 + 147 + wpr = ALIGN(wpr, 4096); 148 + lsfw->offset.img = wpr; 149 + wpr += lsfw->img.size; 150 + 151 + wpr = ALIGN(wpr, 256); 152 + lsfw->offset.bld = wpr; 153 + lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256); 154 + wpr += lsfw->bl_data_size; 155 + } 156 + 157 + return wpr; 158 + } 159 + 160 + int 161 + gp102_acr_wpr_parse(struct nvkm_acr *acr) 162 + { 163 + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; 164 + 165 + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { 166 + wpr_header_v1_dump(&acr->subdev, hdr); 167 + if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) 168 + return -ENOMEM; 169 + } 170 + 171 + return 0; 172 + } 173 + 174 + MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); 175 + MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); 176 + 177 + MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); 178 + MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); 179 + 180 + MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); 181 + MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); 182 + 183 + MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); 184 + MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); 185 + 186 + static const struct nvkm_acr_hsf_fwif 187 + gp102_acr_unload_fwif[] = { 188 + { 0, nvkm_acr_hsfw_load, &gm200_acr_unload_0 }, 189 + {} 190 + }; 191 + 192 + int 193 + gp102_acr_load_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) 194 + { 195 + struct flcn_acr_desc_v1 *desc = (void *)&hsfw->image[hsfw->data_addr]; 196 + 197 + desc->wpr_region_id = 1; 198 + desc->regions.no_regions = 2; 199 + desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; 200 + desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; 201 + desc->regions.region_props[0].region_id = 1; 202 + desc->regions.region_props[0].read_mask = 0xf; 203 + desc->regions.region_props[0].write_mask = 0xc; 204 + desc->regions.region_props[0].client_mask = 0x2; 205 + desc->regions.region_props[0].shadow_mem_start_addr = 206 + acr->shadow_start >> 8; 207 + flcn_acr_desc_v1_dump(&acr->subdev, desc); 208 + 209 + return gm200_acr_hsfw_load(acr, hsfw, 210 + &acr->subdev.device->sec2->falcon); 211 + } 212 + 213 + static const struct nvkm_acr_hsf_func 214 + gp102_acr_load_0 = { 215 + .load = gp102_acr_load_load, 216 + .boot = gm200_acr_load_boot, 217 + .bld = gm200_acr_hsfw_bld, 218 + }; 219 + 220 + MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); 221 + MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); 222 + 223 + MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); 224 + MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); 225 + 226 + MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); 227 + MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); 228 + 229 + MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); 230 + MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); 231 + 232 + static const struct nvkm_acr_hsf_fwif 233 + gp102_acr_load_fwif[] = { 234 + { 0, nvkm_acr_hsfw_load, &gp102_acr_load_0 }, 235 + {} 236 + }; 237 + 238 + static const struct nvkm_acr_func 239 + gp102_acr = { 240 + .load = gp102_acr_load_fwif, 241 + .unload = gp102_acr_unload_fwif, 242 + .wpr_parse = gp102_acr_wpr_parse, 243 + .wpr_layout = gp102_acr_wpr_layout, 244 + .wpr_alloc = gp102_acr_wpr_alloc, 245 + .wpr_build = gp102_acr_wpr_build, 246 + .wpr_patch = gp102_acr_wpr_patch, 247 + .wpr_check = gm200_acr_wpr_check, 248 + .init = gm200_acr_init, 249 + }; 250 + 251 + int 252 + gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif) 253 + { 254 + struct nvkm_subdev *subdev = &acr->subdev; 255 + const struct nvkm_acr_hsf_fwif *hsfwif; 256 + 257 + hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad", 258 + acr, "acr/bl", "acr/ucode_load", "load"); 259 + if (IS_ERR(hsfwif)) 260 + return PTR_ERR(hsfwif); 261 + 262 + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", 263 + acr, "acr/unload_bl", "acr/ucode_unload", 264 + "unload"); 265 + if (IS_ERR(hsfwif)) 266 + return PTR_ERR(hsfwif); 267 + 268 + return 0; 269 + } 270 + 271 + static const struct nvkm_acr_fwif 272 + gp102_acr_fwif[] = { 273 + { 0, gp102_acr_load, &gp102_acr }, 274 + {} 275 + }; 276 + 277 + int 278 + gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 279 + { 280 + return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr); 281 + }
+111
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <subdev/mmu.h> 25 + 26 + #include <nvfw/flcn.h> 27 + 28 + void 29 + gp108_acr_hsfw_bld(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 30 + { 31 + struct flcn_bl_dmem_desc_v2 hsdesc = { 32 + .ctx_dma = FALCON_DMAIDX_VIRT, 33 + .code_dma_base = hsf->vma->addr, 34 + .non_sec_code_off = hsf->non_sec_addr, 35 + .non_sec_code_size = hsf->non_sec_size, 36 + .sec_code_off = hsf->sec_addr, 37 + .sec_code_size = hsf->sec_size, 38 + .code_entry_point = 0, 39 + .data_dma_base = hsf->vma->addr + hsf->data_addr, 40 + .data_size = hsf->data_size, 41 + .argc = 0, 42 + .argv = 0, 43 + }; 44 + 45 + flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hsdesc); 46 + 47 + nvkm_falcon_load_dmem(hsf->falcon, &hsdesc, 0, sizeof(hsdesc), 0); 48 + } 49 + 50 + const struct nvkm_acr_hsf_func 51 + gp108_acr_unload_0 = { 52 + .load = gm200_acr_unload_load, 53 + .boot = gm200_acr_unload_boot, 54 + .bld = gp108_acr_hsfw_bld, 55 + }; 56 + 57 + MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); 58 + MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); 59 + 60 + MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); 61 + MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); 62 + 63 + static const struct nvkm_acr_hsf_fwif 64 + gp108_acr_unload_fwif[] = { 65 + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, 66 + {} 67 + }; 68 + 69 + static const struct nvkm_acr_hsf_func 70 + gp108_acr_load_0 = { 71 + .load = gp102_acr_load_load, 72 + .boot = gm200_acr_load_boot, 73 + .bld = gp108_acr_hsfw_bld, 74 + }; 75 + 76 + MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); 77 + MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); 78 + 79 + MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); 80 + MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); 81 + 82 + static const struct nvkm_acr_hsf_fwif 83 + gp108_acr_load_fwif[] = { 84 + { 0, nvkm_acr_hsfw_load, &gp108_acr_load_0 }, 85 + {} 86 + }; 87 + 88 + static const struct nvkm_acr_func 89 + gp108_acr = { 90 + .load = gp108_acr_load_fwif, 91 + .unload = gp108_acr_unload_fwif, 92 + .wpr_parse = gp102_acr_wpr_parse, 93 + .wpr_layout = gp102_acr_wpr_layout, 94 + .wpr_alloc = gp102_acr_wpr_alloc, 95 + .wpr_build = gp102_acr_wpr_build, 96 + .wpr_patch = gp102_acr_wpr_patch, 97 + .wpr_check = gm200_acr_wpr_check, 98 + .init = gm200_acr_init, 99 + }; 100 + 101 + static const struct nvkm_acr_fwif 102 + gp108_acr_fwif[] = { 103 + { 0, gp102_acr_load, &gp108_acr }, 104 + {} 105 + }; 106 + 107 + int 108 + gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 109 + { 110 + return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr); 111 + }
+57
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 25 + MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); 26 + MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); 27 + #endif 28 + 29 + static const struct nvkm_acr_hsf_fwif 30 + gp10b_acr_load_fwif[] = { 31 + { 0, nvkm_acr_hsfw_load, &gm20b_acr_load_0 }, 32 + {} 33 + }; 34 + 35 + static const struct nvkm_acr_func 36 + gp10b_acr = { 37 + .load = gp10b_acr_load_fwif, 38 + .wpr_parse = gm200_acr_wpr_parse, 39 + .wpr_layout = gm200_acr_wpr_layout, 40 + .wpr_alloc = gm20b_acr_wpr_alloc, 41 + .wpr_build = gm200_acr_wpr_build, 42 + .wpr_patch = gm200_acr_wpr_patch, 43 + .wpr_check = gm200_acr_wpr_check, 44 + .init = gm200_acr_init, 45 + }; 46 + 47 + static const struct nvkm_acr_fwif 48 + gp10b_acr_fwif[] = { 49 + { 0, gm20b_acr_load, &gp10b_acr }, 50 + {} 51 + }; 52 + 53 + int 54 + gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 55 + { 56 + return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr); 57 + }
+180
drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/firmware.h> 25 + 26 + #include <nvfw/fw.h> 27 + #include <nvfw/hs.h> 28 + 29 + static void 30 + nvkm_acr_hsfw_del(struct nvkm_acr_hsfw *hsfw) 31 + { 32 + list_del(&hsfw->head); 33 + kfree(hsfw->imem); 34 + kfree(hsfw->image); 35 + kfree(hsfw->sig.prod.data); 36 + kfree(hsfw->sig.dbg.data); 37 + kfree(hsfw); 38 + } 39 + 40 + void 41 + nvkm_acr_hsfw_del_all(struct nvkm_acr *acr) 42 + { 43 + struct nvkm_acr_hsfw *hsfw, *hsft; 44 + list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) { 45 + nvkm_acr_hsfw_del(hsfw); 46 + } 47 + } 48 + 49 + static int 50 + nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver, 51 + struct nvkm_acr_hsfw *hsfw) 52 + { 53 + struct nvkm_subdev *subdev = &acr->subdev; 54 + const struct firmware *fw; 55 + const struct nvfw_bin_hdr *hdr; 56 + const struct nvfw_hs_header *fwhdr; 57 + const struct nvfw_hs_load_header *lhdr; 58 + u32 loc, sig; 59 + int ret; 60 + 61 + ret = nvkm_firmware_get(subdev, name, ver, &fw); 62 + if (ret < 0) 63 + return ret; 64 + 65 + hdr = nvfw_bin_hdr(subdev, fw->data); 66 + fwhdr = nvfw_hs_header(subdev, fw->data + hdr->header_offset); 67 + 68 + /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's 69 + * standard format, and don't have the indirection seen in the 0x10de 70 + * case. 71 + */ 72 + switch (hdr->bin_magic) { 73 + case 0x000010de: 74 + loc = *(u32 *)(fw->data + fwhdr->patch_loc); 75 + sig = *(u32 *)(fw->data + fwhdr->patch_sig); 76 + break; 77 + case 0x3b1d14f0: 78 + loc = fwhdr->patch_loc; 79 + sig = fwhdr->patch_sig; 80 + break; 81 + default: 82 + ret = -EINVAL; 83 + goto done; 84 + } 85 + 86 + lhdr = nvfw_hs_load_header(subdev, fw->data + fwhdr->hdr_offset); 87 + 88 + if (!(hsfw->image = kmalloc(hdr->data_size, GFP_KERNEL))) { 89 + ret = -ENOMEM; 90 + goto done; 91 + } 92 + 93 + memcpy(hsfw->image, fw->data + hdr->data_offset, hdr->data_size); 94 + hsfw->image_size = hdr->data_size; 95 + hsfw->non_sec_addr = lhdr->non_sec_code_off; 96 + hsfw->non_sec_size = lhdr->non_sec_code_size; 97 + hsfw->sec_addr = lhdr->apps[0]; 98 + hsfw->sec_size = lhdr->apps[lhdr->num_apps]; 99 + hsfw->data_addr = lhdr->data_dma_base; 100 + hsfw->data_size = lhdr->data_size; 101 + 102 + hsfw->sig.prod.size = fwhdr->sig_prod_size; 103 + hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL); 104 + if (!hsfw->sig.prod.data) { 105 + ret = -ENOMEM; 106 + goto done; 107 + } 108 + 109 + memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig, 110 + hsfw->sig.prod.size); 111 + 112 + hsfw->sig.dbg.size = fwhdr->sig_dbg_size; 113 + hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL); 114 + if (!hsfw->sig.dbg.data) { 115 + ret = -ENOMEM; 116 + goto done; 117 + } 118 + 119 + memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig, 120 + hsfw->sig.dbg.size); 121 + 122 + hsfw->sig.patch_loc = loc; 123 + done: 124 + nvkm_firmware_put(fw); 125 + return ret; 126 + } 127 + 128 + static int 129 + nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, 130 + struct nvkm_acr_hsfw *hsfw) 131 + { 132 + struct nvkm_subdev *subdev = &acr->subdev; 133 + const struct nvfw_bin_hdr *hdr; 134 + const struct nvfw_bl_desc *desc; 135 + const struct firmware *fw; 136 + u8 *data; 137 + int ret; 138 + 139 + ret = nvkm_firmware_get(subdev, name, ver, &fw); 140 + if (ret) 141 + return ret; 142 + 143 + hdr = nvfw_bin_hdr(subdev, fw->data); 144 + desc = nvfw_bl_desc(subdev, fw->data + hdr->header_offset); 145 + data = (void *)fw->data + hdr->data_offset; 146 + 147 + hsfw->imem_size = desc->code_size; 148 + hsfw->imem_tag = desc->start_tag; 149 + hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); 150 + memcpy(hsfw->imem, data + desc->code_off, desc->code_size); 151 + 152 + nvkm_firmware_put(fw); 153 + return 0; 154 + } 155 + 156 + int 157 + nvkm_acr_hsfw_load(struct nvkm_acr *acr, const char *bl, const char *fw, 158 + const char *name, int version, 159 + const struct nvkm_acr_hsf_fwif *fwif) 160 + { 161 + struct nvkm_acr_hsfw *hsfw; 162 + int ret; 163 + 164 + if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL))) 165 + return -ENOMEM; 166 + 167 + hsfw->func = fwif->func; 168 + hsfw->name = name; 169 + list_add_tail(&hsfw->head, &acr->hsfw); 170 + 171 + ret = nvkm_acr_hsfw_load_bl(acr, bl, version, hsfw); 172 + if (ret) 173 + goto done; 174 + 175 + ret = nvkm_acr_hsfw_load_image(acr, fw, version, hsfw); 176 + done: 177 + if (ret) 178 + nvkm_acr_hsfw_del(hsfw); 179 + return ret; 180 + }
+249
drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + #include <core/falcon.h> 24 + #include <core/firmware.h> 25 + #include <nvfw/fw.h> 26 + #include <nvfw/ls.h> 27 + 28 + void 29 + nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw) 30 + { 31 + nvkm_blob_dtor(&lsfw->img); 32 + nvkm_firmware_put(lsfw->sig); 33 + list_del(&lsfw->head); 34 + kfree(lsfw); 35 + } 36 + 37 + void 38 + nvkm_acr_lsfw_del_all(struct nvkm_acr *acr) 39 + { 40 + struct nvkm_acr_lsfw *lsfw, *lsft; 41 + list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 42 + nvkm_acr_lsfw_del(lsfw); 43 + } 44 + } 45 + 46 + static struct nvkm_acr_lsfw * 47 + nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id) 48 + { 49 + struct nvkm_acr_lsfw *lsfw; 50 + list_for_each_entry(lsfw, &acr->lsfw, head) { 51 + if (lsfw->id == id) 52 + return lsfw; 53 + } 54 + return NULL; 55 + } 56 + 57 + struct nvkm_acr_lsfw * 58 + nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr, 59 + struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id) 60 + { 61 + struct nvkm_acr_lsfw *lsfw = nvkm_acr_lsfw_get(acr, id); 62 + 63 + if (lsfw && lsfw->func) { 64 + nvkm_error(&acr->subdev, "LSFW %d redefined\n", id); 65 + return ERR_PTR(-EEXIST); 66 + } 67 + 68 + if (!lsfw) { 69 + if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL))) 70 + return ERR_PTR(-ENOMEM); 71 + 72 + lsfw->id = id; 73 + list_add_tail(&lsfw->head, &acr->lsfw); 74 + } 75 + 76 + lsfw->func = func; 77 + lsfw->falcon = falcon; 78 + return lsfw; 79 + } 80 + 81 + static struct nvkm_acr_lsfw * 82 + nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev, 83 + struct nvkm_falcon *falcon, 84 + enum nvkm_acr_lsf_id id, 85 + const char *path, int ver, 86 + const struct nvkm_acr_lsf_func *func, 87 + const struct firmware **pdesc) 88 + { 89 + struct nvkm_acr *acr = subdev->device->acr; 90 + struct nvkm_acr_lsfw *lsfw; 91 + int ret; 92 + 93 + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) 94 + return lsfw; 95 + 96 + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); 97 + if (ret) 98 + goto done; 99 + 100 + ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img); 101 + if (ret) 102 + goto done; 103 + 104 + ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc); 105 + done: 106 + if (ret) { 107 + nvkm_acr_lsfw_del(lsfw); 108 + return ERR_PTR(ret); 109 + } 110 + 111 + return lsfw; 112 + } 113 + 114 + static void 115 + nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc, 116 + struct nvkm_acr_lsfw *lsfw) 117 + { 118 + lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256); 119 + lsfw->bootloader_imem_offset = desc->bootloader_imem_offset; 120 + 121 + lsfw->app_size = ALIGN(desc->app_size, 256); 122 + lsfw->app_start_offset = desc->app_start_offset; 123 + lsfw->app_imem_entry = desc->app_imem_entry; 124 + lsfw->app_resident_code_offset = desc->app_resident_code_offset; 125 + lsfw->app_resident_code_size = desc->app_resident_code_size; 126 + lsfw->app_resident_data_offset = desc->app_resident_data_offset; 127 + lsfw->app_resident_data_size = desc->app_resident_data_size; 128 + 129 + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + 130 + lsfw->bootloader_size; 131 + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - 132 + lsfw->ucode_size; 133 + } 134 + 135 + int 136 + nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev, 137 + struct nvkm_falcon *falcon, 138 + enum nvkm_acr_lsf_id id, 139 + const char *path, int ver, 140 + const struct nvkm_acr_lsf_func *func) 141 + { 142 + const struct firmware *fw; 143 + struct nvkm_acr_lsfw *lsfw; 144 + 145 + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, 146 + func, &fw); 147 + if (IS_ERR(lsfw)) 148 + return PTR_ERR(lsfw); 149 + 150 + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw); 151 + nvkm_firmware_put(fw); 152 + return 0; 153 + } 154 + 155 + int 156 + nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev, 157 + struct nvkm_falcon *falcon, 158 + enum nvkm_acr_lsf_id id, 159 + const char *path, int ver, 160 + const struct nvkm_acr_lsf_func *func) 161 + { 162 + const struct firmware *fw; 163 + struct nvkm_acr_lsfw *lsfw; 164 + 165 + lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, 166 + func, &fw); 167 + if (IS_ERR(lsfw)) 168 + return PTR_ERR(lsfw); 169 + 170 + nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw); 171 + nvkm_firmware_put(fw); 172 + return 0; 173 + } 174 + 175 + int 176 + nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev, 177 + struct nvkm_falcon *falcon, 178 + enum nvkm_acr_lsf_id id, 179 + const char *path, int ver, 180 + const struct nvkm_acr_lsf_func *func) 181 + { 182 + struct nvkm_acr *acr = subdev->device->acr; 183 + struct nvkm_acr_lsfw *lsfw; 184 + const struct firmware *bl = NULL, *inst = NULL, *data = NULL; 185 + const struct nvfw_bin_hdr *hdr; 186 + const struct nvfw_bl_desc *desc; 187 + u32 *bldata; 188 + int ret; 189 + 190 + if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id)))) 191 + return PTR_ERR(lsfw); 192 + 193 + ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl); 194 + if (ret) 195 + goto done; 196 + 197 + hdr = nvfw_bin_hdr(subdev, bl->data); 198 + desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset); 199 + bldata = (void *)(bl->data + hdr->data_offset); 200 + 201 + ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst); 202 + if (ret) 203 + goto done; 204 + 205 + ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data); 206 + if (ret) 207 + goto done; 208 + 209 + ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig); 210 + if (ret) 211 + goto done; 212 + 213 + lsfw->bootloader_size = ALIGN(desc->code_size, 256); 214 + lsfw->bootloader_imem_offset = desc->start_tag << 8; 215 + 216 + lsfw->app_start_offset = lsfw->bootloader_size; 217 + lsfw->app_imem_entry = 0; 218 + lsfw->app_resident_code_offset = 0; 219 + lsfw->app_resident_code_size = ALIGN(inst->size, 256); 220 + lsfw->app_resident_data_offset = lsfw->app_resident_code_size; 221 + lsfw->app_resident_data_size = ALIGN(data->size, 256); 222 + lsfw->app_size = lsfw->app_resident_code_size + 223 + lsfw->app_resident_data_size; 224 + 225 + lsfw->img.size = lsfw->bootloader_size + lsfw->app_size; 226 + if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) { 227 + ret = -ENOMEM; 228 + goto done; 229 + } 230 + 231 + memcpy(lsfw->img.data, bldata, lsfw->bootloader_size); 232 + memcpy(lsfw->img.data + lsfw->app_start_offset + 233 + lsfw->app_resident_code_offset, inst->data, inst->size); 234 + memcpy(lsfw->img.data + lsfw->app_start_offset + 235 + lsfw->app_resident_data_offset, data->data, data->size); 236 + 237 + lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + 238 + lsfw->bootloader_size; 239 + lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - 240 + lsfw->ucode_size; 241 + 242 + done: 243 + if (ret) 244 + nvkm_acr_lsfw_del(lsfw); 245 + nvkm_firmware_put(data); 246 + nvkm_firmware_put(inst); 247 + nvkm_firmware_put(bl); 248 + return ret; 249 + }
+151
drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h
··· 1 + #ifndef __NVKM_ACR_PRIV_H__ 2 + #define __NVKM_ACR_PRIV_H__ 3 + #include <subdev/acr.h> 4 + struct lsb_header_tail; 5 + 6 + struct nvkm_acr_fwif { 7 + int version; 8 + int (*load)(struct nvkm_acr *, int version, 9 + const struct nvkm_acr_fwif *); 10 + const struct nvkm_acr_func *func; 11 + }; 12 + 13 + int gm20b_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); 14 + int gp102_acr_load(struct nvkm_acr *, int, const struct nvkm_acr_fwif *); 15 + 16 + struct nvkm_acr_lsf; 17 + struct nvkm_acr_func { 18 + const struct nvkm_acr_hsf_fwif *load; 19 + const struct nvkm_acr_hsf_fwif *ahesasc; 20 + const struct nvkm_acr_hsf_fwif *asb; 21 + const struct nvkm_acr_hsf_fwif *unload; 22 + int (*wpr_parse)(struct nvkm_acr *); 23 + u32 (*wpr_layout)(struct nvkm_acr *); 24 + int (*wpr_alloc)(struct nvkm_acr *, u32 wpr_size); 25 + int (*wpr_build)(struct nvkm_acr *, struct nvkm_acr_lsf *rtos); 26 + void (*wpr_patch)(struct nvkm_acr *, s64 adjust); 27 + void (*wpr_check)(struct nvkm_acr *, u64 *start, u64 *limit); 28 + int (*init)(struct nvkm_acr *); 29 + void (*fini)(struct nvkm_acr *); 30 + }; 31 + 32 + int gm200_acr_wpr_parse(struct nvkm_acr *); 33 + u32 gm200_acr_wpr_layout(struct nvkm_acr *); 34 + int gm200_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); 35 + void gm200_acr_wpr_patch(struct nvkm_acr *, s64); 36 + void gm200_acr_wpr_check(struct nvkm_acr *, u64 *, u64 *); 37 + void gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *, 38 + struct lsb_header_tail *); 39 + int gm200_acr_init(struct nvkm_acr *); 40 + 41 + int gm20b_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); 42 + 43 + int gp102_acr_wpr_parse(struct nvkm_acr *); 44 + u32 gp102_acr_wpr_layout(struct nvkm_acr *); 45 + int gp102_acr_wpr_alloc(struct nvkm_acr *, u32 wpr_size); 46 + int gp102_acr_wpr_build(struct nvkm_acr *, struct nvkm_acr_lsf *); 47 + int gp102_acr_wpr_build_lsb(struct nvkm_acr *, struct nvkm_acr_lsfw *); 48 + void gp102_acr_wpr_patch(struct nvkm_acr *, s64); 49 + 50 + struct nvkm_acr_hsfw { 51 + const struct nvkm_acr_hsf_func *func; 52 + const char *name; 53 + struct list_head head; 54 + 55 + u32 imem_size; 56 + u32 imem_tag; 57 + u32 *imem; 58 + 59 + u8 *image; 60 + u32 image_size; 61 + u32 non_sec_addr; 62 + u32 non_sec_size; 63 + u32 sec_addr; 64 + u32 sec_size; 65 + u32 data_addr; 66 + u32 data_size; 67 + 68 + struct { 69 + struct { 70 + void *data; 71 + u32 size; 72 + } prod, dbg; 73 + u32 patch_loc; 74 + } sig; 75 + }; 76 + 77 + struct nvkm_acr_hsf_fwif { 78 + int version; 79 + int (*load)(struct nvkm_acr *, const char *bl, const char *fw, 80 + const char *name, int version, 81 + const struct nvkm_acr_hsf_fwif *); 82 + const struct nvkm_acr_hsf_func *func; 83 + }; 84 + 85 + int nvkm_acr_hsfw_load(struct nvkm_acr *, const char *, const char *, 86 + const char *, int, const struct nvkm_acr_hsf_fwif *); 87 + void nvkm_acr_hsfw_del_all(struct nvkm_acr *); 88 + 89 + struct nvkm_acr_hsf { 90 + const struct nvkm_acr_hsf_func *func; 91 + const char *name; 92 + struct list_head head; 93 + 94 + u32 imem_size; 95 + u32 imem_tag; 96 + u32 *imem; 97 + 98 + u32 non_sec_addr; 99 + u32 non_sec_size; 100 + u32 sec_addr; 101 + u32 sec_size; 102 + u32 data_addr; 103 + u32 data_size; 104 + 105 + struct nvkm_memory *ucode; 106 + struct nvkm_vma *vma; 107 + struct nvkm_falcon *falcon; 108 + }; 109 + 110 + struct nvkm_acr_hsf_func { 111 + int (*load)(struct nvkm_acr *, struct nvkm_acr_hsfw *); 112 + int (*boot)(struct nvkm_acr *, struct nvkm_acr_hsf *); 113 + void (*bld)(struct nvkm_acr *, struct nvkm_acr_hsf *); 114 + }; 115 + 116 + int gm200_acr_hsfw_load(struct nvkm_acr *, struct nvkm_acr_hsfw *, 117 + struct nvkm_falcon *); 118 + int gm200_acr_hsfw_boot(struct nvkm_acr *, struct nvkm_acr_hsf *, 119 + u32 clear_intr, u32 mbox0_ok); 120 + 121 + int gm200_acr_load_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); 122 + 123 + extern const struct nvkm_acr_hsf_func gm200_acr_unload_0; 124 + int gm200_acr_unload_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); 125 + int gm200_acr_unload_boot(struct nvkm_acr *, struct nvkm_acr_hsf *); 126 + void gm200_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); 127 + 128 + extern const struct nvkm_acr_hsf_func gm20b_acr_load_0; 129 + 130 + int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); 131 + 132 + extern const struct nvkm_acr_hsf_func gp108_acr_unload_0; 133 + void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); 134 + 135 + int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int, 136 + struct nvkm_acr **); 137 + int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name); 138 + 139 + struct nvkm_acr_lsf { 140 + const struct nvkm_acr_lsf_func *func; 141 + struct nvkm_falcon *falcon; 142 + enum nvkm_acr_lsf_id id; 143 + struct list_head head; 144 + }; 145 + 146 + struct nvkm_acr_lsfw *nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *, 147 + struct nvkm_acr *, struct nvkm_falcon *, 148 + enum nvkm_acr_lsf_id); 149 + void nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *); 150 + void nvkm_acr_lsfw_del_all(struct nvkm_acr *); 151 + #endif
+215
drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <core/firmware.h> 25 + #include <core/memory.h> 26 + #include <subdev/gsp.h> 27 + #include <subdev/pmu.h> 28 + #include <engine/sec2.h> 29 + 30 + #include <nvfw/acr.h> 31 + 32 + static int 33 + tu102_acr_init(struct nvkm_acr *acr) 34 + { 35 + int ret = nvkm_acr_hsf_boot(acr, "AHESASC"); 36 + if (ret) 37 + return ret; 38 + 39 + return nvkm_acr_hsf_boot(acr, "ASB"); 40 + } 41 + 42 + static int 43 + tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos) 44 + { 45 + struct nvkm_acr_lsfw *lsfw; 46 + u32 offset = 0; 47 + int ret; 48 + 49 + /*XXX: shared sub-WPR headers, fill terminator for now. */ 50 + nvkm_wo32(acr->wpr, 0x200, 0xffffffff); 51 + 52 + /* Fill per-LSF structures. */ 53 + list_for_each_entry(lsfw, &acr->lsfw, head) { 54 + struct lsf_signature_v1 *sig = (void *)lsfw->sig->data; 55 + struct wpr_header_v1 hdr = { 56 + .falcon_id = lsfw->id, 57 + .lsb_offset = lsfw->offset.lsb, 58 + .bootstrap_owner = NVKM_ACR_LSF_GSPLITE, 59 + .lazy_bootstrap = 1, 60 + .bin_version = sig->version, 61 + .status = WPR_HEADER_V1_STATUS_COPY, 62 + }; 63 + 64 + /* Write WPR header. */ 65 + nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr)); 66 + offset += sizeof(hdr); 67 + 68 + /* Write LSB header. */ 69 + ret = gp102_acr_wpr_build_lsb(acr, lsfw); 70 + if (ret) 71 + return ret; 72 + 73 + /* Write ucode image. */ 74 + nvkm_wobj(acr->wpr, lsfw->offset.img, 75 + lsfw->img.data, 76 + lsfw->img.size); 77 + 78 + /* Write bootloader data. */ 79 + lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw); 80 + } 81 + 82 + /* Finalise WPR. */ 83 + nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID); 84 + return 0; 85 + } 86 + 87 + static int 88 + tu102_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf) 89 + { 90 + return gm200_acr_hsfw_boot(acr, hsf, 0, 0); 91 + } 92 + 93 + static int 94 + tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw, 95 + const char *name, int version, 96 + const struct nvkm_acr_hsf_fwif *fwif) 97 + { 98 + return 0; 99 + } 100 + 101 + MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin"); 102 + MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin"); 103 + 104 + MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin"); 105 + MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin"); 106 + 107 + MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin"); 108 + MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin"); 109 + 110 + static const struct nvkm_acr_hsf_fwif 111 + tu102_acr_unload_fwif[] = { 112 + { 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 }, 113 + { -1, tu102_acr_hsfw_nofw }, 114 + {} 115 + }; 116 + 117 + static int 118 + tu102_acr_asb_load(struct nvkm_acr *acr, struct nvkm_acr_hsfw *hsfw) 119 + { 120 + return gm200_acr_hsfw_load(acr, hsfw, &acr->subdev.device->gsp->falcon); 121 + } 122 + 123 + static const struct nvkm_acr_hsf_func 124 + tu102_acr_asb_0 = { 125 + .load = tu102_acr_asb_load, 126 + .boot = tu102_acr_hsfw_boot, 127 + .bld = gp108_acr_hsfw_bld, 128 + }; 129 + 130 + MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin"); 131 + MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin"); 132 + MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin"); 133 + 134 + static const struct nvkm_acr_hsf_fwif 135 + tu102_acr_asb_fwif[] = { 136 + { 0, nvkm_acr_hsfw_load, &tu102_acr_asb_0 }, 137 + { -1, tu102_acr_hsfw_nofw }, 138 + {} 139 + }; 140 + 141 + static const struct nvkm_acr_hsf_func 142 + tu102_acr_ahesasc_0 = { 143 + .load = gp102_acr_load_load, 144 + .boot = tu102_acr_hsfw_boot, 145 + .bld = gp108_acr_hsfw_bld, 146 + }; 147 + 148 + MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin"); 149 + MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin"); 150 + 151 + MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin"); 152 + MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin"); 153 + 154 + MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin"); 155 + MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin"); 156 + 157 + static const struct nvkm_acr_hsf_fwif 158 + tu102_acr_ahesasc_fwif[] = { 159 + { 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 }, 160 + { -1, tu102_acr_hsfw_nofw }, 161 + {} 162 + }; 163 + 164 + static const struct nvkm_acr_func 165 + tu102_acr = { 166 + .ahesasc = tu102_acr_ahesasc_fwif, 167 + .asb = tu102_acr_asb_fwif, 168 + .unload = tu102_acr_unload_fwif, 169 + .wpr_parse = gp102_acr_wpr_parse, 170 + .wpr_layout = gp102_acr_wpr_layout, 171 + .wpr_alloc = gp102_acr_wpr_alloc, 172 + .wpr_patch = gp102_acr_wpr_patch, 173 + .wpr_build = tu102_acr_wpr_build, 174 + .wpr_check = gm200_acr_wpr_check, 175 + .init = tu102_acr_init, 176 + }; 177 + 178 + static int 179 + tu102_acr_load(struct nvkm_acr *acr, int version, 180 + const struct nvkm_acr_fwif *fwif) 181 + { 182 + struct nvkm_subdev *subdev = &acr->subdev; 183 + const struct nvkm_acr_hsf_fwif *hsfwif; 184 + 185 + hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC", 186 + acr, "acr/bl", "acr/ucode_ahesasc", 187 + "AHESASC"); 188 + if (IS_ERR(hsfwif)) 189 + return PTR_ERR(hsfwif); 190 + 191 + hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB", 192 + acr, "acr/bl", "acr/ucode_asb", "ASB"); 193 + if (IS_ERR(hsfwif)) 194 + return PTR_ERR(hsfwif); 195 + 196 + hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload", 197 + acr, "acr/unload_bl", "acr/ucode_unload", 198 + "unload"); 199 + if (IS_ERR(hsfwif)) 200 + return PTR_ERR(hsfwif); 201 + 202 + return 0; 203 + } 204 + 205 + static const struct nvkm_acr_fwif 206 + tu102_acr_fwif[] = { 207 + { 0, tu102_acr_load, &tu102_acr }, 208 + {} 209 + }; 210 + 211 + int 212 + tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) 213 + { 214 + return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr); 215 + }
+1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
··· 2 2 nvkm-y += nvkm/subdev/fault/base.o 3 3 nvkm-y += nvkm/subdev/fault/user.o 4 4 nvkm-y += nvkm/subdev/fault/gp100.o 5 + nvkm-y += nvkm/subdev/fault/gp10b.o 5 6 nvkm-y += nvkm/subdev/fault/gv100.o 6 7 nvkm-y += nvkm/subdev/fault/tu102.o
+2 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
··· 108 108 return ret; 109 109 110 110 /* Pin fault buffer in BAR2. */ 111 - buffer->addr = nvkm_memory_bar2(buffer->mem); 111 + buffer->addr = fault->func->buffer.pin(buffer); 112 112 if (buffer->addr == ~0ULL) 113 113 return -EFAULT; 114 114 ··· 146 146 struct nvkm_fault *fault = nvkm_fault(subdev); 147 147 int i; 148 148 149 + nvkm_notify_fini(&fault->nrpfb); 149 150 nvkm_event_fini(&fault->event); 150 151 151 152 for (i = 0; i < fault->buffer_nr; i++) {
+12 -5
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
··· 21 21 */ 22 22 #include "priv.h" 23 23 24 + #include <core/memory.h> 24 25 #include <subdev/mc.h> 25 26 26 27 #include <nvif/class.h> 27 28 28 - static void 29 + void 29 30 gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) 30 31 { 31 32 struct nvkm_device *device = buffer->fault->subdev.device; 32 33 nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); 33 34 } 34 35 35 - static void 36 + void 36 37 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) 37 38 { 38 39 struct nvkm_device *device = buffer->fault->subdev.device; 39 40 nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000); 40 41 } 41 42 42 - static void 43 + void 43 44 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer) 44 45 { 45 46 struct nvkm_device *device = buffer->fault->subdev.device; ··· 49 48 nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001); 50 49 } 51 50 52 - static void 51 + u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer) 52 + { 53 + return nvkm_memory_bar2(buffer->mem); 54 + } 55 + 56 + void 53 57 gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer) 54 58 { 55 59 buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78); ··· 62 56 buffer->put = 0x002a80; 63 57 } 64 58 65 - static void 59 + void 66 60 gp100_fault_intr(struct nvkm_fault *fault) 67 61 { 68 62 nvkm_event_send(&fault->event, 1, 0, NULL, 0); ··· 74 68 .buffer.nr = 1, 75 69 .buffer.entry_size = 32, 76 70 .buffer.info = gp100_fault_buffer_info, 71 + .buffer.pin = gp100_fault_buffer_pin, 77 72 .buffer.init = gp100_fault_buffer_init, 78 73 .buffer.fini = gp100_fault_buffer_fini, 79 74 .buffer.intr = gp100_fault_buffer_intr,
+53
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
··· 1 + /* 2 + * Copyright (c) 2019 NVIDIA Corporation. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + 23 + #include "priv.h" 24 + 25 + #include <core/memory.h> 26 + 27 + #include <nvif/class.h> 28 + 29 + u64 30 + gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer) 31 + { 32 + return nvkm_memory_addr(buffer->mem); 33 + } 34 + 35 + static const struct nvkm_fault_func 36 + gp10b_fault = { 37 + .intr = gp100_fault_intr, 38 + .buffer.nr = 1, 39 + .buffer.entry_size = 32, 40 + .buffer.info = gp100_fault_buffer_info, 41 + .buffer.pin = gp10b_fault_buffer_pin, 42 + .buffer.init = gp100_fault_buffer_init, 43 + .buffer.fini = gp100_fault_buffer_fini, 44 + .buffer.intr = gp100_fault_buffer_intr, 45 + .user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 }, 46 + }; 47 + 48 + int 49 + gp10b_fault_new(struct nvkm_device *device, int index, 50 + struct nvkm_fault **pfault) 51 + { 52 + return nvkm_fault_new_(&gp10b_fault, device, index, pfault); 53 + }
+1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
··· 214 214 .buffer.nr = 2, 215 215 .buffer.entry_size = 32, 216 216 .buffer.info = gv100_fault_buffer_info, 217 + .buffer.pin = gp100_fault_buffer_pin, 217 218 .buffer.init = gv100_fault_buffer_init, 218 219 .buffer.fini = gv100_fault_buffer_fini, 219 220 .buffer.intr = gv100_fault_buffer_intr,
+10
drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
··· 30 30 int nr; 31 31 u32 entry_size; 32 32 void (*info)(struct nvkm_fault_buffer *); 33 + u64 (*pin)(struct nvkm_fault_buffer *); 33 34 void (*init)(struct nvkm_fault_buffer *); 34 35 void (*fini)(struct nvkm_fault_buffer *); 35 36 void (*intr)(struct nvkm_fault_buffer *, bool enable); ··· 40 39 int rp; 41 40 } user; 42 41 }; 42 + 43 + void gp100_fault_buffer_intr(struct nvkm_fault_buffer *, bool enable); 44 + void gp100_fault_buffer_fini(struct nvkm_fault_buffer *); 45 + void gp100_fault_buffer_init(struct nvkm_fault_buffer *); 46 + u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *); 47 + void gp100_fault_buffer_info(struct nvkm_fault_buffer *); 48 + void gp100_fault_intr(struct nvkm_fault *); 49 + 50 + u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *); 43 51 44 52 int gv100_fault_oneinit(struct nvkm_fault *); 45 53
+1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
··· 154 154 .buffer.nr = 2, 155 155 .buffer.entry_size = 32, 156 156 .buffer.info = tu102_fault_buffer_info, 157 + .buffer.pin = gp100_fault_buffer_pin, 157 158 .buffer.init = tu102_fault_buffer_init, 158 159 .buffer.fini = tu102_fault_buffer_fini, 159 160 .buffer.intr = tu102_fault_buffer_intr,
+19
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
··· 154 154 155 155 if (fb->func->init_unkn) 156 156 fb->func->init_unkn(fb); 157 + 158 + if (fb->func->vpr.scrub_required && 159 + fb->func->vpr.scrub_required(fb)) { 160 + nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); 161 + 162 + ret = fb->func->vpr.scrub(fb); 163 + if (ret) 164 + return ret; 165 + 166 + if (fb->func->vpr.scrub_required(fb)) { 167 + nvkm_error(subdev, "VPR still locked after scrub!\n"); 168 + return -EIO; 169 + } 170 + 171 + nvkm_debug(subdev, "VPR scrubber binary successful\n"); 172 + } 173 + 157 174 return 0; 158 175 } 159 176 ··· 188 171 189 172 nvkm_mm_fini(&fb->tags); 190 173 nvkm_ram_del(&fb->ram); 174 + 175 + nvkm_blob_dtor(&fb->vpr_scrubber); 191 176 192 177 if (fb->func->dtor) 193 178 return fb->func->dtor(fb);
+95 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
··· 24 24 #include "gf100.h" 25 25 #include "ram.h" 26 26 27 + #include <core/firmware.h> 27 28 #include <core/memory.h> 29 + #include <nvfw/fw.h> 30 + #include <nvfw/hs.h> 31 + #include <engine/nvdec.h> 32 + 33 + int 34 + gp102_fb_vpr_scrub(struct nvkm_fb *fb) 35 + { 36 + struct nvkm_subdev *subdev = &fb->subdev; 37 + struct nvkm_device *device = subdev->device; 38 + struct nvkm_falcon *falcon = &device->nvdec[0]->falcon; 39 + struct nvkm_blob *blob = &fb->vpr_scrubber; 40 + const struct nvfw_bin_hdr *hsbin_hdr; 41 + const struct nvfw_hs_header *fw_hdr; 42 + const struct nvfw_hs_load_header *lhdr; 43 + void *scrub_data; 44 + u32 patch_loc, patch_sig; 45 + int ret; 46 + 47 + nvkm_falcon_get(falcon, subdev); 48 + 49 + hsbin_hdr = nvfw_bin_hdr(subdev, blob->data); 50 + fw_hdr = nvfw_hs_header(subdev, blob->data + hsbin_hdr->header_offset); 51 + lhdr = nvfw_hs_load_header(subdev, blob->data + fw_hdr->hdr_offset); 52 + scrub_data = blob->data + hsbin_hdr->data_offset; 53 + 54 + patch_loc = *(u32 *)(blob->data + fw_hdr->patch_loc); 55 + patch_sig = *(u32 *)(blob->data + fw_hdr->patch_sig); 56 + if (falcon->debug) { 57 + memcpy(scrub_data + patch_loc, 58 + blob->data + fw_hdr->sig_dbg_offset + patch_sig, 59 + fw_hdr->sig_dbg_size); 60 + } else { 61 + memcpy(scrub_data + patch_loc, 62 + blob->data + fw_hdr->sig_prod_offset + patch_sig, 63 + fw_hdr->sig_prod_size); 64 + } 65 + 66 + nvkm_falcon_reset(falcon); 67 + nvkm_falcon_bind_context(falcon, NULL); 68 + 69 + nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, 70 + lhdr->non_sec_code_size, 71 + lhdr->non_sec_code_off >> 8, 0, false); 72 + nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], 73 + ALIGN(lhdr->apps[0], 0x100), 74 + lhdr->apps[1], 75 + lhdr->apps[0] >> 8, 0, true); 76 + nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, 77 + lhdr->data_size, 0); 78 + 79 + nvkm_falcon_set_start_addr(falcon, 0x0); 80 + nvkm_falcon_start(falcon); 81 + 82 + ret = nvkm_falcon_wait_for_halt(falcon, 500); 83 + if (ret < 0) { 84 + ret = -ETIMEDOUT; 85 + goto end; 86 + } 87 + 88 + /* put nvdec in clean state - without reset it will remain in HS mode */ 89 + nvkm_falcon_reset(falcon); 90 + end: 91 + nvkm_falcon_put(falcon, subdev); 92 + return ret; 93 + } 94 + 95 + bool 96 + gp102_fb_vpr_scrub_required(struct nvkm_fb *fb) 97 + { 98 + struct nvkm_device *device = fb->subdev.device; 99 + nvkm_wr32(device, 0x100cd0, 0x2); 100 + return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0; 101 + } 28 102 29 103 static const struct nvkm_fb_func 30 104 gp102_fb = { ··· 107 33 .init = gp100_fb_init, 108 34 .init_remapper = gp100_fb_init_remapper, 109 35 .init_page = gm200_fb_init_page, 36 + .vpr.scrub_required = gp102_fb_vpr_scrub_required, 37 + .vpr.scrub = gp102_fb_vpr_scrub, 110 38 .ram_new = gp100_ram_new, 111 39 }; 112 40 113 41 int 42 + gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, 43 + int index, struct nvkm_fb **pfb) 44 + { 45 + int ret = gf100_fb_new_(func, device, index, pfb); 46 + if (ret) 47 + return ret; 48 + 49 + return nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0, 50 + &(*pfb)->vpr_scrubber); 51 + } 52 + 53 + int 114 54 gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) 115 55 { 116 - return gf100_fb_new_(&gp102_fb, device, index, pfb); 56 + return gp102_fb_new_(&gp102_fb, device, index, pfb); 117 57 } 58 + 59 + MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); 60 + MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); 61 + MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); 62 + MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); 63 + MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
··· 35 35 .init = gp100_fb_init, 36 36 .init_page = gv100_fb_init_page, 37 37 .init_unkn = gp100_fb_init_unkn, 38 + .vpr.scrub_required = gp102_fb_vpr_scrub_required, 39 + .vpr.scrub = gp102_fb_vpr_scrub, 38 40 .ram_new = gp100_ram_new, 39 41 .default_bigpage = 16, 40 42 }; ··· 44 42 int 45 43 gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) 46 44 { 47 - return gf100_fb_new_(&gv100_fb, device, index, pfb); 45 + return gp102_fb_new_(&gv100_fb, device, index, pfb); 48 46 } 47 + 48 + MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); 49 + MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); 50 + MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin"); 51 + MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+10
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
··· 17 17 void (*intr)(struct nvkm_fb *); 18 18 19 19 struct { 20 + bool (*scrub_required)(struct nvkm_fb *); 21 + int (*scrub)(struct nvkm_fb *); 22 + } vpr; 23 + 24 + struct { 20 25 int regions; 21 26 void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size, 22 27 u32 pitch, u32 flags, struct nvkm_fb_tile *); ··· 77 72 78 73 void gp100_fb_init_remapper(struct nvkm_fb *); 79 74 void gp100_fb_init_unkn(struct nvkm_fb *); 75 + 76 + int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int, 77 + struct nvkm_fb **); 78 + bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); 79 + int gp102_fb_vpr_scrub(struct nvkm_fb *); 80 80 #endif
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
··· 655 655 656 656 static const struct nvkm_ram_func 657 657 gf100_ram = { 658 - .upper = 0x0200000000, 658 + .upper = 0x0200000000ULL, 659 659 .probe_fbp = gf100_ram_probe_fbp, 660 660 .probe_fbp_amount = gf100_ram_probe_fbp_amount, 661 661 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
··· 43 43 44 44 static const struct nvkm_ram_func 45 45 gf108_ram = { 46 - .upper = 0x0200000000, 46 + .upper = 0x0200000000ULL, 47 47 .probe_fbp = gf100_ram_probe_fbp, 48 48 .probe_fbp_amount = gf108_ram_probe_fbp_amount, 49 49 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
··· 1698 1698 1699 1699 static const struct nvkm_ram_func 1700 1700 gk104_ram = { 1701 - .upper = 0x0200000000, 1701 + .upper = 0x0200000000ULL, 1702 1702 .probe_fbp = gf100_ram_probe_fbp, 1703 1703 .probe_fbp_amount = gf108_ram_probe_fbp_amount, 1704 1704 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
··· 33 33 34 34 static const struct nvkm_ram_func 35 35 gm107_ram = { 36 - .upper = 0x1000000000, 36 + .upper = 0x1000000000ULL, 37 37 .probe_fbp = gm107_ram_probe_fbp, 38 38 .probe_fbp_amount = gf108_ram_probe_fbp_amount, 39 39 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
··· 48 48 49 49 static const struct nvkm_ram_func 50 50 gm200_ram = { 51 - .upper = 0x1000000000, 51 + .upper = 0x1000000000ULL, 52 52 .probe_fbp = gm107_ram_probe_fbp, 53 53 .probe_fbp_amount = gm200_ram_probe_fbp_amount, 54 54 .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
··· 79 79 80 80 static const struct nvkm_ram_func 81 81 gp100_ram = { 82 - .upper = 0x1000000000, 82 + .upper = 0x1000000000ULL, 83 83 .probe_fbp = gm107_ram_probe_fbp, 84 84 .probe_fbp_amount = gm200_ram_probe_fbp_amount, 85 85 .probe_fbpa_amount = gp100_ram_probe_fbpa,
+1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
··· 1 1 # SPDX-License-Identifier: MIT 2 + nvkm-y += nvkm/subdev/gsp/base.o 2 3 nvkm-y += nvkm/subdev/gsp/gv100.o
+59
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
··· 1 + /* 2 + * Copyright 2019 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + #include <core/falcon.h> 24 + #include <core/firmware.h> 25 + #include <subdev/acr.h> 26 + #include <subdev/top.h> 27 + 28 + static void * 29 + nvkm_gsp_dtor(struct nvkm_subdev *subdev) 30 + { 31 + struct nvkm_gsp *gsp = nvkm_gsp(subdev); 32 + nvkm_falcon_dtor(&gsp->falcon); 33 + return gsp; 34 + } 35 + 36 + static const struct nvkm_subdev_func 37 + nvkm_gsp = { 38 + .dtor = nvkm_gsp_dtor, 39 + }; 40 + 41 + int 42 + nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, 43 + int index, struct nvkm_gsp **pgsp) 44 + { 45 + struct nvkm_gsp *gsp; 46 + 47 + if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) 48 + return -ENOMEM; 49 + 50 + nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev); 51 + 52 + fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); 53 + if (IS_ERR(fwif)) 54 + return PTR_ERR(fwif); 55 + 56 + return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, 57 + nvkm_subdev_name[gsp->subdev.index], 0, 58 + &gsp->falcon); 59 + }
+23 -30
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
··· 19 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 20 * OTHER DEALINGS IN THE SOFTWARE. 21 21 */ 22 - #include <subdev/gsp.h> 23 - #include <subdev/top.h> 24 - #include <engine/falcon.h> 22 + #include "priv.h" 23 + 24 + static const struct nvkm_falcon_func 25 + gv100_gsp_flcn = { 26 + .fbif = 0x600, 27 + .load_imem = nvkm_falcon_v1_load_imem, 28 + .load_dmem = nvkm_falcon_v1_load_dmem, 29 + .read_dmem = nvkm_falcon_v1_read_dmem, 30 + .bind_context = gp102_sec2_flcn_bind_context, 31 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 32 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 33 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 34 + .start = nvkm_falcon_v1_start, 35 + .enable = gp102_sec2_flcn_enable, 36 + .disable = nvkm_falcon_v1_disable, 37 + }; 25 38 26 39 static int 27 - gv100_gsp_oneinit(struct nvkm_subdev *subdev) 40 + gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) 28 41 { 29 - struct nvkm_gsp *gsp = nvkm_gsp(subdev); 30 - 31 - gsp->addr = nvkm_top_addr(subdev->device, subdev->index); 32 - if (!gsp->addr) 33 - return -EINVAL; 34 - 35 - return nvkm_falcon_v1_new(subdev, "GSP", gsp->addr, &gsp->falcon); 42 + return 0; 36 43 } 37 44 38 - static void * 39 - gv100_gsp_dtor(struct nvkm_subdev *subdev) 40 - { 41 - struct nvkm_gsp *gsp = nvkm_gsp(subdev); 42 - nvkm_falcon_del(&gsp->falcon); 43 - return gsp; 44 - } 45 - 46 - static const struct nvkm_subdev_func 47 - gv100_gsp = { 48 - .dtor = gv100_gsp_dtor, 49 - .oneinit = gv100_gsp_oneinit, 45 + struct nvkm_gsp_fwif 46 + gv100_gsp[] = { 47 + { -1, gv100_gsp_nofw, &gv100_gsp_flcn }, 48 + {} 50 49 }; 51 50 52 51 int 53 52 gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp) 54 53 { 55 - struct nvkm_gsp *gsp; 56 - 57 - if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) 58 - return -ENOMEM; 59 - 60 - nvkm_subdev_ctor(&gv100_gsp, device, index, &gsp->subdev); 61 - return 0; 54 + return nvkm_gsp_new_(gv100_gsp, device, index, pgsp); 62 55 }
+15
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_GSP_PRIV_H__ 3 + #define __NVKM_GSP_PRIV_H__ 4 + #include <subdev/gsp.h> 5 + enum nvkm_acr_lsf_id; 6 + 7 + struct nvkm_gsp_fwif { 8 + int version; 9 + int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *); 10 + const struct nvkm_falcon_func *flcn; 11 + }; 12 + 13 + int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int, 14 + struct nvkm_gsp **); 15 + #endif
+1
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
··· 6 6 nvkm-y += nvkm/subdev/ltc/gm200.o 7 7 nvkm-y += nvkm/subdev/ltc/gp100.o 8 8 nvkm-y += nvkm/subdev/ltc/gp102.o 9 + nvkm-y += nvkm/subdev/ltc/gp10b.o
+65
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
··· 1 + /* 2 + * Copyright (c) 2019 NVIDIA Corporation. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Thierry Reding 23 + */ 24 + 25 + #include "priv.h" 26 + 27 + static void 28 + gp10b_ltc_init(struct nvkm_ltc *ltc) 29 + { 30 + struct nvkm_device *device = ltc->subdev.device; 31 + struct iommu_fwspec *spec; 32 + 33 + nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); 34 + nvkm_wr32(device, 0x17e000, ltc->ltc_nr); 35 + nvkm_wr32(device, 0x100800, ltc->ltc_nr); 36 + 37 + spec = dev_iommu_fwspec_get(device->dev); 38 + if (spec) { 39 + u32 sid = spec->ids[0] & 0xffff; 40 + 41 + /* stream ID */ 42 + nvkm_wr32(device, 0x160000, sid << 2); 43 + } 44 + } 45 + 46 + static const struct nvkm_ltc_func 47 + gp10b_ltc = { 48 + .oneinit = gp100_ltc_oneinit, 49 + .init = gp10b_ltc_init, 50 + .intr = gp100_ltc_intr, 51 + .cbc_clear = gm107_ltc_cbc_clear, 52 + .cbc_wait = gm107_ltc_cbc_wait, 53 + .zbc = 16, 54 + .zbc_clear_color = gm107_ltc_zbc_clear_color, 55 + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, 56 + .zbc_clear_stencil = gp102_ltc_zbc_clear_stencil, 57 + .invalidate = gf100_ltc_invalidate, 58 + .flush = gf100_ltc_flush, 59 + }; 60 + 61 + int 62 + gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) 63 + { 64 + return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc); 65 + }
+2
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
··· 46 46 int gp100_ltc_oneinit(struct nvkm_ltc *); 47 47 void gp100_ltc_init(struct nvkm_ltc *); 48 48 void gp100_ltc_intr(struct nvkm_ltc *); 49 + 50 + void gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *, int, const u32); 49 51 #endif
+2 -1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
··· 30 30 * The value 0xff represents an invalid storage type. 31 31 */ 32 32 const u8 * 33 - gf100_mmu_kind(struct nvkm_mmu *mmu, int *count) 33 + gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) 34 34 { 35 35 static const u8 36 36 kind[256] = { ··· 69 69 }; 70 70 71 71 *count = ARRAY_SIZE(kind); 72 + *invalid = 0xff; 72 73 return kind; 73 74 } 74 75
+2 -1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
··· 27 27 #include <nvif/class.h> 28 28 29 29 const u8 * 30 - gm200_mmu_kind(struct nvkm_mmu *mmu, int *count) 30 + gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) 31 31 { 32 32 static const u8 33 33 kind[256] = { ··· 65 65 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff 66 66 }; 67 67 *count = ARRAY_SIZE(kind); 68 + *invalid = 0xff; 68 69 return kind; 69 70 } 70 71
+2 -1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
··· 27 27 #include <nvif/class.h> 28 28 29 29 const u8 * 30 - nv50_mmu_kind(struct nvkm_mmu *base, int *count) 30 + nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid) 31 31 { 32 32 /* 0x01: no bank swizzle 33 33 * 0x02: bank swizzled ··· 57 57 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f 58 58 }; 59 59 *count = ARRAY_SIZE(kind); 60 + *invalid = 0x7f; 60 61 return kind; 61 62 } 62 63
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
··· 35 35 u32 pd_offset; 36 36 } vmm; 37 37 38 - const u8 *(*kind)(struct nvkm_mmu *, int *count); 38 + const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid); 39 39 bool kind_sys; 40 40 }; 41 41 42 42 extern const struct nvkm_mmu_func nv04_mmu; 43 43 44 - const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count); 44 + const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); 45 45 46 - const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count); 46 + const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid); 47 47 48 - const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *); 48 + const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *); 49 49 50 50 struct nvkm_mmu_pt { 51 51 union {
+15 -1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
··· 1 1 /* 2 2 * Copyright 2018 Red Hat Inc. 3 + * Copyright 2019 NVIDIA Corporation. 3 4 * 4 5 * Permission is hereby granted, free of charge, to any person obtaining a 5 6 * copy of this software and associated documentation files (the "Software"), ··· 27 26 28 27 #include <nvif/class.h> 29 28 29 + const u8 * 30 + tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid) 31 + { 32 + static const u8 33 + kind[16] = { 34 + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */ 35 + 0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07, 36 + }; 37 + *count = ARRAY_SIZE(kind); 38 + *invalid = 0x07; 39 + return kind; 40 + } 41 + 30 42 static const struct nvkm_mmu_func 31 43 tu102_mmu = { 32 44 .dma_bits = 47, 33 45 .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}}, 34 46 .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, 35 47 .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new }, 36 - .kind = gm200_mmu_kind, 48 + .kind = tu102_mmu_kind, 37 49 .kind_sys = true, 38 50 }; 39 51
+5 -2
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
··· 111 111 } *args = argv; 112 112 const u8 *kind = NULL; 113 113 int ret = -ENOSYS, count = 0; 114 + u8 kind_inv = 0; 114 115 115 116 if (mmu->func->kind) 116 - kind = mmu->func->kind(mmu, &count); 117 + kind = mmu->func->kind(mmu, &count, &kind_inv); 117 118 118 119 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) { 119 120 if (argc != args->v0.count * sizeof(*args->v0.data)) 120 121 return -EINVAL; 121 122 if (args->v0.count > count) 122 123 return -EINVAL; 124 + args->v0.kind_inv = kind_inv; 123 125 memcpy(args->v0.data, kind, args->v0.count); 124 126 } else 125 127 return ret; ··· 159 157 struct nvkm_mmu *mmu = device->mmu; 160 158 struct nvkm_ummu *ummu; 161 159 int ret = -ENOSYS, kinds = 0; 160 + u8 unused = 0; 162 161 163 162 if (mmu->func->kind) 164 - mmu->func->kind(mmu, &kinds); 163 + mmu->func->kind(mmu, &kinds, &unused); 165 164 166 165 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { 167 166 args->v0.dmabits = mmu->dma_bits;
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
··· 247 247 } *args = argv; 248 248 struct nvkm_device *device = vmm->mmu->subdev.device; 249 249 struct nvkm_memory *memory = map->memory; 250 - u8 kind, priv, ro, vol; 250 + u8 kind, kind_inv, priv, ro, vol; 251 251 int kindn, aper, ret = -ENOSYS; 252 252 const u8 *kindm; 253 253 ··· 274 274 if (WARN_ON(aper < 0)) 275 275 return aper; 276 276 277 - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); 278 - if (kind >= kindn || kindm[kind] == 0xff) { 277 + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); 278 + if (kind >= kindn || kindm[kind] == kind_inv) { 279 279 VMM_DEBUG(vmm, "kind %02x", kind); 280 280 return -EINVAL; 281 281 }
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
··· 320 320 } *args = argv; 321 321 struct nvkm_device *device = vmm->mmu->subdev.device; 322 322 struct nvkm_memory *memory = map->memory; 323 - u8 kind, priv, ro, vol; 323 + u8 kind, kind_inv, priv, ro, vol; 324 324 int kindn, aper, ret = -ENOSYS; 325 325 const u8 *kindm; 326 326 ··· 347 347 if (WARN_ON(aper < 0)) 348 348 return aper; 349 349 350 - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); 351 - if (kind >= kindn || kindm[kind] == 0xff) { 350 + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); 351 + if (kind >= kindn || kindm[kind] == kind_inv) { 352 352 VMM_DEBUG(vmm, "kind %02x", kind); 353 353 return -EINVAL; 354 354 }
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
··· 235 235 struct nvkm_device *device = vmm->mmu->subdev.device; 236 236 struct nvkm_ram *ram = device->fb->ram; 237 237 struct nvkm_memory *memory = map->memory; 238 - u8 aper, kind, comp, priv, ro; 238 + u8 aper, kind, kind_inv, comp, priv, ro; 239 239 int kindn, ret = -ENOSYS; 240 240 const u8 *kindm; 241 241 ··· 278 278 return -EINVAL; 279 279 } 280 280 281 - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); 282 - if (kind >= kindn || kindm[kind] == 0x7f) { 281 + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); 282 + if (kind >= kindn || kindm[kind] == kind_inv) { 283 283 VMM_DEBUG(vmm, "kind %02x", kind); 284 284 return -EINVAL; 285 285 }
+1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
··· 12 12 nvkm-y += nvkm/subdev/pmu/gm20b.o 13 13 nvkm-y += nvkm/subdev/pmu/gp100.o 14 14 nvkm-y += nvkm/subdev/pmu/gp102.o 15 + nvkm-y += nvkm/subdev/pmu/gp10b.o
+38 -15
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
··· 23 23 */ 24 24 #include "priv.h" 25 25 26 - #include <core/msgqueue.h> 26 + #include <core/firmware.h> 27 27 #include <subdev/timer.h> 28 28 29 29 bool ··· 85 85 pmu->func->fini(pmu); 86 86 87 87 flush_work(&pmu->recv.work); 88 + 89 + reinit_completion(&pmu->wpr_ready); 90 + 91 + nvkm_falcon_cmdq_fini(pmu->lpq); 92 + nvkm_falcon_cmdq_fini(pmu->hpq); 93 + pmu->initmsg_received = false; 88 94 return 0; 89 95 } 90 96 ··· 139 133 return ret; 140 134 } 141 135 142 - static int 143 - nvkm_pmu_oneinit(struct nvkm_subdev *subdev) 144 - { 145 - struct nvkm_pmu *pmu = nvkm_pmu(subdev); 146 - return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); 147 - } 148 - 149 136 static void * 150 137 nvkm_pmu_dtor(struct nvkm_subdev *subdev) 151 138 { 152 139 struct nvkm_pmu *pmu = nvkm_pmu(subdev); 153 - nvkm_msgqueue_del(&pmu->queue); 154 - nvkm_falcon_del(&pmu->falcon); 140 + nvkm_falcon_msgq_del(&pmu->msgq); 141 + nvkm_falcon_cmdq_del(&pmu->lpq); 142 + nvkm_falcon_cmdq_del(&pmu->hpq); 143 + nvkm_falcon_qmgr_del(&pmu->qmgr); 144 + nvkm_falcon_dtor(&pmu->falcon); 155 145 return nvkm_pmu(subdev); 156 146 } 157 147 ··· 155 153 nvkm_pmu = { 156 154 .dtor = nvkm_pmu_dtor, 157 155 .preinit = nvkm_pmu_preinit, 158 - .oneinit = nvkm_pmu_oneinit, 159 156 .init = nvkm_pmu_init, 160 157 .fini = nvkm_pmu_fini, 161 158 .intr = nvkm_pmu_intr, 162 159 }; 163 160 164 161 int 165 - nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, 162 + nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, 166 163 int index, struct nvkm_pmu *pmu) 167 164 { 165 + int ret; 166 + 168 167 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); 169 - pmu->func = func; 168 + 170 169 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); 171 170 init_waitqueue_head(&pmu->recv.wait); 171 + 172 + fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); 173 + if (IS_ERR(fwif)) 174 + return PTR_ERR(fwif); 175 + 176 + pmu->func = fwif->func; 177 + 178 + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, 179 + nvkm_subdev_name[pmu->subdev.index], 0x10a000, 180 + &pmu->falcon); 181 + if (ret) 182 + return ret; 183 + 184 + if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || 185 + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || 186 + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || 187 + (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) 188 + return ret; 189 + 190 + init_completion(&pmu->wpr_ready); 172 191 return 0; 173 192 } 174 193 175 194 int 176 - nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, 195 + nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, 177 196 int index, struct nvkm_pmu **ppmu) 178 197 { 179 198 struct nvkm_pmu *pmu; 180 199 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) 181 200 return -ENOMEM; 182 - return nvkm_pmu_ctor(func, device, index, *ppmu); 201 + return nvkm_pmu_ctor(fwif, device, index, *ppmu); 183 202 }
+14 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
··· 42 42 43 43 static const struct nvkm_pmu_func 44 44 gf100_pmu = { 45 + .flcn = &gt215_pmu_flcn, 45 46 .code.data = gf100_pmu_code, 46 47 .code.size = sizeof(gf100_pmu_code), 47 48 .data.data = gf100_pmu_data, ··· 57 56 }; 58 57 59 58 int 59 + gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) 60 + { 61 + return 0; 62 + } 63 + 64 + static const struct nvkm_pmu_fwif 65 + gf100_pmu_fwif[] = { 66 + { -1, gf100_pmu_nofw, &gf100_pmu }, 67 + {} 68 + }; 69 + 70 + int 60 71 gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 61 72 { 62 - return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu); 73 + return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu); 63 74 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
··· 26 26 27 27 static const struct nvkm_pmu_func 28 28 gf119_pmu = { 29 + .flcn = &gt215_pmu_flcn, 29 30 .code.data = gf119_pmu_code, 30 31 .code.size = sizeof(gf119_pmu_code), 31 32 .data.data = gf119_pmu_data, ··· 40 39 .recv = gt215_pmu_recv, 41 40 }; 42 41 42 + static const struct nvkm_pmu_fwif 43 + gf119_pmu_fwif[] = { 44 + { -1, gf100_pmu_nofw, &gf119_pmu }, 45 + {} 46 + }; 47 + 43 48 int 44 49 gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 45 50 { 46 - return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu); 51 + return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu); 47 52 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
··· 105 105 106 106 static const struct nvkm_pmu_func 107 107 gk104_pmu = { 108 + .flcn = &gt215_pmu_flcn, 108 109 .code.data = gk104_pmu_code, 109 110 .code.size = sizeof(gk104_pmu_code), 110 111 .data.data = gk104_pmu_data, ··· 120 119 .pgob = gk104_pmu_pgob, 121 120 }; 122 121 122 + static const struct nvkm_pmu_fwif 123 + gk104_pmu_fwif[] = { 124 + { -1, gf100_pmu_nofw, &gk104_pmu }, 125 + {} 126 + }; 127 + 123 128 int 124 129 gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 125 130 { 126 - return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu); 131 + return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu); 127 132 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
··· 84 84 85 85 static const struct nvkm_pmu_func 86 86 gk110_pmu = { 87 + .flcn = &gt215_pmu_flcn, 87 88 .code.data = gk110_pmu_code, 88 89 .code.size = sizeof(gk110_pmu_code), 89 90 .data.data = gk110_pmu_data, ··· 99 98 .pgob = gk110_pmu_pgob, 100 99 }; 101 100 101 + static const struct nvkm_pmu_fwif 102 + gk110_pmu_fwif[] = { 103 + { -1, gf100_pmu_nofw, &gk110_pmu }, 104 + {} 105 + }; 106 + 102 107 int 103 108 gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 104 109 { 105 - return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu); 110 + return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu); 106 111 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
··· 26 26 27 27 static const struct nvkm_pmu_func 28 28 gk208_pmu = { 29 + .flcn = &gt215_pmu_flcn, 29 30 .code.data = gk208_pmu_code, 30 31 .code.size = sizeof(gk208_pmu_code), 31 32 .data.data = gk208_pmu_data, ··· 41 40 .pgob = gk110_pmu_pgob, 42 41 }; 43 42 43 + static const struct nvkm_pmu_fwif 44 + gk208_pmu_fwif[] = { 45 + { -1, gf100_pmu_nofw, &gk208_pmu }, 46 + {} 47 + }; 48 + 44 49 int 45 50 gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 46 51 { 47 - return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu); 52 + return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu); 48 53 }
+15 -6
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
··· 95 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, 96 96 struct gk20a_pmu_dvfs_dev_status *status) 97 97 { 98 - struct nvkm_falcon *falcon = pmu->base.falcon; 98 + struct nvkm_falcon *falcon = &pmu->base.falcon; 99 99 100 100 status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10)); 101 101 status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10)); ··· 104 104 static void 105 105 gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) 106 106 { 107 - struct nvkm_falcon *falcon = pmu->base.falcon; 107 + struct nvkm_falcon *falcon = &pmu->base.falcon; 108 108 109 109 nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000); 110 110 nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000); ··· 160 160 struct gk20a_pmu *gpmu = gk20a_pmu(pmu); 161 161 nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm); 162 162 163 - nvkm_falcon_put(pmu->falcon, &pmu->subdev); 163 + nvkm_falcon_put(&pmu->falcon, &pmu->subdev); 164 164 } 165 165 166 166 static int ··· 169 169 struct gk20a_pmu *gpmu = gk20a_pmu(pmu); 170 170 struct nvkm_subdev *subdev = &pmu->subdev; 171 171 struct nvkm_device *device = pmu->subdev.device; 172 - struct nvkm_falcon *falcon = pmu->falcon; 172 + struct nvkm_falcon *falcon = &pmu->falcon; 173 173 int ret; 174 174 175 175 ret = nvkm_falcon_get(falcon, subdev); ··· 196 196 197 197 static const struct nvkm_pmu_func 198 198 gk20a_pmu = { 199 + .flcn = &gt215_pmu_flcn, 199 200 .enabled = gf100_pmu_enabled, 200 201 .init = gk20a_pmu_init, 201 202 .fini = gk20a_pmu_fini, 202 203 .reset = gf100_pmu_reset, 203 204 }; 204 205 206 + static const struct nvkm_pmu_fwif 207 + gk20a_pmu_fwif[] = { 208 + { -1, gf100_pmu_nofw, &gk20a_pmu }, 209 + {} 210 + }; 211 + 205 212 int 206 213 gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 207 214 { 208 215 struct gk20a_pmu *pmu; 216 + int ret; 209 217 210 218 if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) 211 219 return -ENOMEM; 212 220 *ppmu = &pmu->base; 213 221 214 - nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base); 222 + ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base); 223 + if (ret) 224 + return ret; 215 225 216 226 pmu->data = &gk20a_dvfs_data; 217 227 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); 218 - 219 228 return 0; 220 229 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
··· 28 28 29 29 static const struct nvkm_pmu_func 30 30 gm107_pmu = { 31 + .flcn = &gt215_pmu_flcn, 31 32 .code.data = gm107_pmu_code, 32 33 .code.size = sizeof(gm107_pmu_code), 33 34 .data.data = gm107_pmu_data, ··· 42 41 .recv = gt215_pmu_recv, 43 42 }; 44 43 44 + static const struct nvkm_pmu_fwif 45 + gm107_pmu_fwif[] = { 46 + { -1, gf100_pmu_nofw, &gm107_pmu }, 47 + {} 48 + }; 49 + 45 50 int 46 51 gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 47 52 { 48 - return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu); 53 + return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu); 49 54 }
+198 -17
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
··· 19 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 20 * DEALINGS IN THE SOFTWARE. 21 21 */ 22 - 23 - #include <engine/falcon.h> 24 - #include <core/msgqueue.h> 25 22 #include "priv.h" 26 23 27 - static void 28 - gm20b_pmu_recv(struct nvkm_pmu *pmu) 24 + #include <core/memory.h> 25 + #include <subdev/acr.h> 26 + 27 + #include <nvfw/flcn.h> 28 + #include <nvfw/pmu.h> 29 + 30 + static int 31 + gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) 29 32 { 30 - if (!pmu->queue) { 31 - nvkm_warn(&pmu->subdev, 32 - "recv function called while no firmware set!\n"); 33 - return; 33 + struct nv_pmu_acr_bootstrap_falcon_msg *msg = 34 + container_of(hdr, typeof(*msg), msg.hdr); 35 + return msg->falcon_id; 36 + } 37 + 38 + int 39 + gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, 40 + enum nvkm_acr_lsf_id id) 41 + { 42 + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); 43 + struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { 44 + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 45 + .cmd.hdr.size = sizeof(cmd), 46 + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, 47 + .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, 48 + .falcon_id = id, 49 + }; 50 + int ret; 51 + 52 + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 53 + gm20b_pmu_acr_bootstrap_falcon_cb, 54 + &pmu->subdev, msecs_to_jiffies(1000)); 55 + if (ret >= 0 && ret != cmd.falcon_id) 56 + ret = -EIO; 57 + return ret; 58 + } 59 + 60 + int 61 + gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) 62 + { 63 + struct nv_pmu_args args = { .secure_mode = true }; 64 + const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); 65 + nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); 66 + nvkm_falcon_start(falcon); 67 + return 0; 68 + } 69 + 70 + void 71 + gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 72 + { 73 + struct loader_config hdr; 74 + u64 addr; 75 + 76 + nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 77 + addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); 78 + hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); 79 + hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); 80 + addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); 81 + hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); 82 + hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); 83 + addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); 84 + hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); 85 + hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); 86 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 87 + 88 + loader_config_dump(&acr->subdev, &hdr); 89 + } 90 + 91 + void 92 + gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, 93 + struct nvkm_acr_lsfw *lsfw) 94 + { 95 + const u64 base = lsfw->offset.img + lsfw->app_start_offset; 96 + const u64 code = (base + lsfw->app_resident_code_offset) >> 8; 97 + const u64 data = (base + lsfw->app_resident_data_offset) >> 8; 98 + const struct loader_config hdr = { 99 + .dma_idx = FALCON_DMAIDX_UCODE, 100 + .code_dma_base = lower_32_bits(code), 101 + .code_size_total = lsfw->app_size, 102 + .code_size_to_load = lsfw->app_resident_code_size, 103 + .code_entry_point = lsfw->app_imem_entry, 104 + .data_dma_base = lower_32_bits(data), 105 + .data_size = lsfw->app_resident_data_size, 106 + .overlay_dma_base = lower_32_bits(code), 107 + .argc = 1, 108 + .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), 109 + .code_dma_base1 = upper_32_bits(code), 110 + .data_dma_base1 = upper_32_bits(data), 111 + .overlay_dma_base1 = upper_32_bits(code), 112 + }; 113 + 114 + nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 115 + } 116 + 117 + static const struct nvkm_acr_lsf_func 118 + gm20b_pmu_acr = { 119 + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, 120 + .bld_size = sizeof(struct loader_config), 121 + .bld_write = gm20b_pmu_acr_bld_write, 122 + .bld_patch = gm20b_pmu_acr_bld_patch, 123 + .boot = gm20b_pmu_acr_boot, 124 + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, 125 + }; 126 + 127 + static int 128 + gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) 129 + { 130 + struct nv_pmu_acr_init_wpr_region_msg *msg = 131 + container_of(hdr, typeof(*msg), msg.hdr); 132 + struct nvkm_pmu *pmu = priv; 133 + struct nvkm_subdev *subdev = &pmu->subdev; 134 + 135 + if (msg->error_code) { 136 + nvkm_error(subdev, "ACR WPR init failure: %d\n", 137 + msg->error_code); 138 + return -EINVAL; 34 139 } 35 140 36 - nvkm_msgqueue_recv(pmu->queue); 141 + nvkm_debug(subdev, "ACR WPR init complete\n"); 142 + complete_all(&pmu->wpr_ready); 143 + return 0; 144 + } 145 + 146 + static int 147 + gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) 148 + { 149 + struct nv_pmu_acr_init_wpr_region_cmd cmd = { 150 + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 151 + .cmd.hdr.size = sizeof(cmd), 152 + .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, 153 + .region_id = 1, 154 + .wpr_offset = 0, 155 + }; 156 + 157 + return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 158 + gm20b_pmu_acr_init_wpr_callback, pmu, 0); 159 + } 160 + 161 + int 162 + gm20b_pmu_initmsg(struct nvkm_pmu *pmu) 163 + { 164 + struct nv_pmu_init_msg msg; 165 + int ret; 166 + 167 + ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); 168 + if (ret) 169 + return ret; 170 + 171 + if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || 172 + msg.msg_type != NV_PMU_INIT_MSG_INIT) 173 + return -EINVAL; 174 + 175 + nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, 176 + msg.queue_info[0].offset, 177 + msg.queue_info[0].size); 178 + nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, 179 + msg.queue_info[1].offset, 180 + msg.queue_info[1].size); 181 + nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, 182 + msg.queue_info[4].offset, 183 + msg.queue_info[4].size); 184 + return gm20b_pmu_acr_init_wpr(pmu); 185 + } 186 + 187 + void 188 + gm20b_pmu_recv(struct nvkm_pmu *pmu) 189 + { 190 + if (!pmu->initmsg_received) { 191 + int ret = pmu->func->initmsg(pmu); 192 + if (ret) { 193 + nvkm_error(&pmu->subdev, 194 + "error parsing init message: %d\n", ret); 195 + return; 196 + } 197 + 198 + pmu->initmsg_received = true; 199 + } 200 + 201 + nvkm_falcon_msgq_recv(pmu->msgq); 37 202 } 38 203 39 204 static const struct nvkm_pmu_func 40 205 gm20b_pmu = { 206 + .flcn = &gt215_pmu_flcn, 41 207 .enabled = gf100_pmu_enabled, 42 208 .intr = gt215_pmu_intr, 43 209 .recv = gm20b_pmu_recv, 210 + .initmsg = gm20b_pmu_initmsg, 211 + }; 212 + 213 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 214 + MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); 215 + MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); 216 + MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); 217 + #endif 218 + 219 + int 220 + gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) 221 + { 222 + return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, 223 + NVKM_ACR_LSF_PMU, "pmu/", 224 + ver, fwif->acr); 225 + } 226 + 227 + static const struct nvkm_pmu_fwif 228 + gm20b_pmu_fwif[] = { 229 + { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, 230 + {} 44 231 }; 45 232 46 233 int 47 234 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 48 235 { 49 - int ret; 50 - 51 - ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu); 52 - if (ret) 53 - return ret; 54 - 55 - return 0; 236 + return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); 56 237 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
··· 25 25 26 26 static const struct nvkm_pmu_func 27 27 gp100_pmu = { 28 + .flcn = &gt215_pmu_flcn, 28 29 .enabled = gf100_pmu_enabled, 29 30 .reset = gf100_pmu_reset, 31 + }; 32 + 33 + static const struct nvkm_pmu_fwif 34 + gp100_pmu_fwif[] = { 35 + { -1, gf100_pmu_nofw, &gp100_pmu }, 36 + {} 30 37 }; 31 38 32 39 int 33 40 gp100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 34 41 { 35 - return nvkm_pmu_new_(&gp100_pmu, device, index, ppmu); 42 + return nvkm_pmu_new_(gp100_pmu_fwif, device, index, ppmu); 36 43 }
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
··· 39 39 40 40 static const struct nvkm_pmu_func 41 41 gp102_pmu = { 42 + .flcn = &gt215_pmu_flcn, 42 43 .enabled = gp102_pmu_enabled, 43 44 .reset = gp102_pmu_reset, 45 + }; 46 + 47 + static const struct nvkm_pmu_fwif 48 + gp102_pmu_fwif[] = { 49 + { -1, gf100_pmu_nofw, &gp102_pmu }, 50 + {} 44 51 }; 45 52 46 53 int 47 54 gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 48 55 { 49 - return nvkm_pmu_new_(&gp102_pmu, device, index, ppmu); 56 + return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu); 50 57 }
+96
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
··· 1 + /* 2 + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 + * DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + 24 + #include <subdev/acr.h> 25 + 26 + #include <nvfw/flcn.h> 27 + #include <nvfw/pmu.h> 28 + 29 + static int 30 + gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv, 31 + struct nv_falcon_msg *hdr) 32 + { 33 + struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg = 34 + container_of(hdr, typeof(*msg), msg.hdr); 35 + return msg->falcon_mask; 36 + } 37 + static int 38 + gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask) 39 + { 40 + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); 41 + struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = { 42 + .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 43 + .cmd.hdr.size = sizeof(cmd), 44 + .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS, 45 + .flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES, 46 + .falcon_mask = mask, 47 + .wpr_lo = 0, /*XXX*/ 48 + .wpr_hi = 0, /*XXX*/ 49 + }; 50 + int ret; 51 + 52 + ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 53 + gp10b_pmu_acr_bootstrap_multiple_falcons_cb, 54 + &pmu->subdev, msecs_to_jiffies(1000)); 55 + if (ret >= 0 && ret != cmd.falcon_mask) 56 + ret = -EIO; 57 + return ret; 58 + } 59 + 60 + static const struct nvkm_acr_lsf_func 61 + gp10b_pmu_acr = { 62 + .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, 63 + .bld_size = sizeof(struct loader_config), 64 + .bld_write = gm20b_pmu_acr_bld_write, 65 + .bld_patch = gm20b_pmu_acr_bld_patch, 66 + .boot = gm20b_pmu_acr_boot, 67 + .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, 68 + .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, 69 + }; 70 + 71 + static const struct nvkm_pmu_func 72 + gp10b_pmu = { 73 + .flcn = &gt215_pmu_flcn, 74 + .enabled = gf100_pmu_enabled, 75 + .intr = gt215_pmu_intr, 76 + .recv = gm20b_pmu_recv, 77 + .initmsg = gm20b_pmu_initmsg, 78 + }; 79 + 80 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 81 + MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); 82 + MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); 83 + MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); 84 + #endif 85 + 86 + static const struct nvkm_pmu_fwif 87 + gp10b_pmu_fwif[] = { 88 + { 0, gm20b_pmu_load, &gp10b_pmu, &gp10b_pmu_acr }, 89 + {} 90 + }; 91 + 92 + int 93 + gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 94 + { 95 + return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu); 96 + }
+26 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
··· 241 241 return 0; 242 242 } 243 243 244 + const struct nvkm_falcon_func 245 + gt215_pmu_flcn = { 246 + .debug = 0xc08, 247 + .fbif = 0xe00, 248 + .load_imem = nvkm_falcon_v1_load_imem, 249 + .load_dmem = nvkm_falcon_v1_load_dmem, 250 + .read_dmem = nvkm_falcon_v1_read_dmem, 251 + .bind_context = nvkm_falcon_v1_bind_context, 252 + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 253 + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 254 + .set_start_addr = nvkm_falcon_v1_set_start_addr, 255 + .start = nvkm_falcon_v1_start, 256 + .enable = nvkm_falcon_v1_enable, 257 + .disable = nvkm_falcon_v1_disable, 258 + .cmdq = { 0x4a0, 0x4b0, 4 }, 259 + .msgq = { 0x4c8, 0x4cc, 0 }, 260 + }; 261 + 244 262 static const struct nvkm_pmu_func 245 263 gt215_pmu = { 264 + .flcn = &gt215_pmu_flcn, 246 265 .code.data = gt215_pmu_code, 247 266 .code.size = sizeof(gt215_pmu_code), 248 267 .data.data = gt215_pmu_data, ··· 275 256 .recv = gt215_pmu_recv, 276 257 }; 277 258 259 + static const struct nvkm_pmu_fwif 260 + gt215_pmu_fwif[] = { 261 + { -1, gf100_pmu_nofw, &gt215_pmu }, 262 + {} 263 + }; 264 + 278 265 int 279 266 gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 280 267 { 281 - return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu); 268 + return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu); 282 269 }
+28 -5
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
··· 4 4 #define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev) 5 5 #include <subdev/pmu.h> 6 6 #include <subdev/pmu/fuc/os.h> 7 - 8 - int nvkm_pmu_ctor(const struct nvkm_pmu_func *, struct nvkm_device *, 9 - int index, struct nvkm_pmu *); 10 - int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *, 11 - int index, struct nvkm_pmu **); 7 + enum nvkm_acr_lsf_id; 8 + struct nvkm_acr_lsfw; 12 9 13 10 struct nvkm_pmu_func { 11 + const struct nvkm_falcon_func *flcn; 12 + 14 13 struct { 15 14 u32 *data; 16 15 u32 size; ··· 28 29 int (*send)(struct nvkm_pmu *, u32 reply[2], u32 process, 29 30 u32 message, u32 data0, u32 data1); 30 31 void (*recv)(struct nvkm_pmu *); 32 + int (*initmsg)(struct nvkm_pmu *); 31 33 void (*pgob)(struct nvkm_pmu *, bool); 32 34 }; 33 35 36 + extern const struct nvkm_falcon_func gt215_pmu_flcn; 34 37 int gt215_pmu_init(struct nvkm_pmu *); 35 38 void gt215_pmu_fini(struct nvkm_pmu *); 36 39 void gt215_pmu_intr(struct nvkm_pmu *); ··· 43 42 void gf100_pmu_reset(struct nvkm_pmu *); 44 43 45 44 void gk110_pmu_pgob(struct nvkm_pmu *, bool); 45 + 46 + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); 47 + void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); 48 + int gm20b_pmu_acr_boot(struct nvkm_falcon *); 49 + int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id); 50 + void gm20b_pmu_recv(struct nvkm_pmu *); 51 + int gm20b_pmu_initmsg(struct nvkm_pmu *); 52 + 53 + struct nvkm_pmu_fwif { 54 + int version; 55 + int (*load)(struct nvkm_pmu *, int ver, const struct nvkm_pmu_fwif *); 56 + const struct nvkm_pmu_func *func; 57 + const struct nvkm_acr_lsf_func *acr; 58 + }; 59 + 60 + int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); 61 + int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); 62 + 63 + int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, 64 + int index, struct nvkm_pmu *); 65 + int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, 66 + int index, struct nvkm_pmu **); 46 67 #endif
-17
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
··· 1 - # SPDX-License-Identifier: MIT 2 - nvkm-y += nvkm/subdev/secboot/base.o 3 - nvkm-y += nvkm/subdev/secboot/hs_ucode.o 4 - nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o 5 - nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o 6 - nvkm-y += nvkm/subdev/secboot/acr.o 7 - nvkm-y += nvkm/subdev/secboot/acr_r352.o 8 - nvkm-y += nvkm/subdev/secboot/acr_r361.o 9 - nvkm-y += nvkm/subdev/secboot/acr_r364.o 10 - nvkm-y += nvkm/subdev/secboot/acr_r367.o 11 - nvkm-y += nvkm/subdev/secboot/acr_r370.o 12 - nvkm-y += nvkm/subdev/secboot/acr_r375.o 13 - nvkm-y += nvkm/subdev/secboot/gm200.o 14 - nvkm-y += nvkm/subdev/secboot/gm20b.o 15 - nvkm-y += nvkm/subdev/secboot/gp102.o 16 - nvkm-y += nvkm/subdev/secboot/gp108.o 17 - nvkm-y += nvkm/subdev/secboot/gp10b.o
-54
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr.h" 24 - 25 - #include <core/firmware.h> 26 - 27 - /** 28 - * Convenience function to duplicate a firmware file in memory and check that 29 - * it has the required minimum size. 30 - */ 31 - void * 32 - nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name, 33 - size_t min_size) 34 - { 35 - const struct firmware *fw; 36 - void *blob; 37 - int ret; 38 - 39 - ret = nvkm_firmware_get(subdev, name, &fw); 40 - if (ret) 41 - return ERR_PTR(ret); 42 - if (fw->size < min_size) { 43 - nvkm_error(subdev, "%s is smaller than expected size %zu\n", 44 - name, min_size); 45 - nvkm_firmware_put(fw); 46 - return ERR_PTR(-EINVAL); 47 - } 48 - blob = kmemdup(fw->data, fw->size, GFP_KERNEL); 49 - nvkm_firmware_put(fw); 50 - if (!blob) 51 - return ERR_PTR(-ENOMEM); 52 - 53 - return blob; 54 - }
-70
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - #ifndef __NVKM_SECBOOT_ACR_H__ 23 - #define __NVKM_SECBOOT_ACR_H__ 24 - 25 - #include "priv.h" 26 - 27 - struct nvkm_acr; 28 - 29 - /** 30 - * struct nvkm_acr_func - properties and functions specific to an ACR 31 - * 32 - * @load: make the ACR ready to run on the given secboot device 33 - * @reset: reset the specified falcon 34 - * @start: start the specified falcon (assumed to have been reset) 35 - */ 36 - struct nvkm_acr_func { 37 - void (*dtor)(struct nvkm_acr *); 38 - int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *); 39 - int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); 40 - int (*load)(struct nvkm_acr *, struct nvkm_falcon *, 41 - struct nvkm_gpuobj *, u64); 42 - int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long); 43 - }; 44 - 45 - /** 46 - * struct nvkm_acr - instance of an ACR 47 - * 48 - * @boot_falcon: ID of the falcon that will perform secure boot 49 - * @managed_falcons: bitfield of falcons managed by this ACR 50 - * @optional_falcons: bitfield of falcons we can live without 51 - */ 52 - struct nvkm_acr { 53 - const struct nvkm_acr_func *func; 54 - const struct nvkm_subdev *subdev; 55 - 56 - enum nvkm_secboot_falcon boot_falcon; 57 - unsigned long managed_falcons; 58 - unsigned long optional_falcons; 59 - }; 60 - 61 - void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t); 62 - 63 - struct nvkm_acr *acr_r352_new(unsigned long); 64 - struct nvkm_acr *acr_r361_new(unsigned long); 65 - struct nvkm_acr *acr_r364_new(unsigned long); 66 - struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); 67 - struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long); 68 - struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); 69 - 70 - #endif
-1241
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r352.h" 24 - #include "hs_ucode.h" 25 - 26 - #include <core/gpuobj.h> 27 - #include <core/firmware.h> 28 - #include <engine/falcon.h> 29 - #include <subdev/pmu.h> 30 - #include <core/msgqueue.h> 31 - #include <engine/sec2.h> 32 - 33 - /** 34 - * struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor 35 - * @signature: 16B signature for secure code. 0s if no secure code 36 - * @ctx_dma: DMA context to be used by BL while loading code/data 37 - * @code_dma_base: 256B-aligned Physical FB Address where code is located 38 - * (falcon's $xcbase register) 39 - * @non_sec_code_off: offset from code_dma_base where the non-secure code is 40 - * located. The offset must be multiple of 256 to help perf 41 - * @non_sec_code_size: the size of the nonSecure code part. 42 - * @sec_code_off: offset from code_dma_base where the secure code is 43 - * located. The offset must be multiple of 256 to help perf 44 - * @sec_code_size: offset from code_dma_base where the secure code is 45 - * located. The offset must be multiple of 256 to help perf 46 - * @code_entry_point: code entry point which will be invoked by BL after 47 - * code is loaded. 48 - * @data_dma_base: 256B aligned Physical FB Address where data is located. 49 - * (falcon's $xdbase register) 50 - * @data_size: size of data block. Should be multiple of 256B 51 - * 52 - * Structure used by the bootloader to load the rest of the code. This has 53 - * to be filled by host and copied into DMEM at offset provided in the 54 - * hsflcn_bl_desc.bl_desc_dmem_load_off. 55 - */ 56 - struct acr_r352_flcn_bl_desc { 57 - u32 reserved[4]; 58 - u32 signature[4]; 59 - u32 ctx_dma; 60 - u32 code_dma_base; 61 - u32 non_sec_code_off; 62 - u32 non_sec_code_size; 63 - u32 sec_code_off; 64 - u32 sec_code_size; 65 - u32 code_entry_point; 66 - u32 data_dma_base; 67 - u32 data_size; 68 - u32 code_dma_base1; 69 - u32 data_dma_base1; 70 - }; 71 - 72 - /** 73 - * acr_r352_generate_flcn_bl_desc - generate generic BL descriptor for LS image 74 - */ 75 - static void 76 - acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr, 77 - const struct ls_ucode_img *img, u64 wpr_addr, 78 - void *_desc) 79 - { 80 - struct acr_r352_flcn_bl_desc *desc = _desc; 81 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 82 - u64 base, addr_code, addr_data; 83 - 84 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 85 - addr_code = (base + pdesc->app_resident_code_offset) >> 8; 86 - addr_data = (base + pdesc->app_resident_data_offset) >> 8; 87 - 88 - desc->ctx_dma = FALCON_DMAIDX_UCODE; 89 - desc->code_dma_base = lower_32_bits(addr_code); 90 - desc->code_dma_base1 = upper_32_bits(addr_code); 91 - desc->non_sec_code_off = pdesc->app_resident_code_offset; 92 - desc->non_sec_code_size = pdesc->app_resident_code_size; 93 - desc->code_entry_point = pdesc->app_imem_entry; 94 - desc->data_dma_base = lower_32_bits(addr_data); 95 - desc->data_dma_base1 = upper_32_bits(addr_data); 96 - desc->data_size = pdesc->app_resident_data_size; 97 - } 98 - 99 - 100 - /** 101 - * struct hsflcn_acr_desc - data section of the HS firmware 102 - * 103 - * This header is to be copied at the beginning of DMEM by the HS bootloader. 104 - * 105 - * @signature: signature of ACR ucode 106 - * @wpr_region_id: region ID holding the WPR header and its details 107 - * @wpr_offset: offset from the WPR region holding the wpr header 108 - * @regions: region descriptors 109 - * @nonwpr_ucode_blob_size: size of LS blob 110 - * @nonwpr_ucode_blob_start: FB location of LS blob is 111 - */ 112 - struct hsflcn_acr_desc { 113 - union { 114 - u8 reserved_dmem[0x200]; 115 - u32 signatures[4]; 116 - } ucode_reserved_space; 117 - u32 wpr_region_id; 118 - u32 wpr_offset; 119 - u32 mmu_mem_range; 120 - #define FLCN_ACR_MAX_REGIONS 2 121 - struct { 122 - u32 no_regions; 123 - struct { 124 - u32 start_addr; 125 - u32 end_addr; 126 - u32 region_id; 127 - u32 read_mask; 128 - u32 write_mask; 129 - u32 client_mask; 130 - } region_props[FLCN_ACR_MAX_REGIONS]; 131 - } regions; 132 - u32 ucode_blob_size; 133 - u64 ucode_blob_base __aligned(8); 134 - struct { 135 - u32 vpr_enabled; 136 - u32 vpr_start; 137 - u32 vpr_end; 138 - u32 hdcp_policies; 139 - } vpr_desc; 140 - }; 141 - 142 - 143 - /* 144 - * Low-secure blob creation 145 - */ 146 - 147 - /** 148 - * struct acr_r352_lsf_lsb_header - LS firmware header 149 - * @signature: signature to verify the firmware against 150 - * @ucode_off: offset of the ucode blob in the WPR region. The ucode 151 - * blob contains the bootloader, code and data of the 152 - * LS falcon 153 - * @ucode_size: size of the ucode blob, including bootloader 154 - * @data_size: size of the ucode blob data 155 - * @bl_code_size: size of the bootloader code 156 - * @bl_imem_off: offset in imem of the bootloader 157 - * @bl_data_off: offset of the bootloader data in WPR region 158 - * @bl_data_size: size of the bootloader data 159 - * @app_code_off: offset of the app code relative to ucode_off 160 - * @app_code_size: size of the app code 161 - * @app_data_off: offset of the app data relative to ucode_off 162 - * @app_data_size: size of the app data 163 - * @flags: flags for the secure bootloader 164 - * 165 - * This structure is written into the WPR region for each managed falcon. Each 166 - * instance is referenced by the lsb_offset member of the corresponding 167 - * lsf_wpr_header. 168 - */ 169 - struct acr_r352_lsf_lsb_header { 170 - /** 171 - * LS falcon signatures 172 - * @prd_keys: signature to use in production mode 173 - * @dgb_keys: signature to use in debug mode 174 - * @b_prd_present: whether the production key is present 175 - * @b_dgb_present: whether the debug key is present 176 - * @falcon_id: ID of the falcon the ucode applies to 177 - */ 178 - struct { 179 - u8 prd_keys[2][16]; 180 - u8 dbg_keys[2][16]; 181 - u32 b_prd_present; 182 - u32 b_dbg_present; 183 - u32 falcon_id; 184 - } signature; 185 - u32 ucode_off; 186 - u32 ucode_size; 187 - u32 data_size; 188 - u32 bl_code_size; 189 - u32 bl_imem_off; 190 - u32 bl_data_off; 191 - u32 bl_data_size; 192 - u32 app_code_off; 193 - u32 app_code_size; 194 - u32 app_data_off; 195 - u32 app_data_size; 196 - u32 flags; 197 - }; 198 - 199 - /** 200 - * struct acr_r352_lsf_wpr_header - LS blob WPR Header 201 - * @falcon_id: LS falcon ID 202 - * @lsb_offset: offset of the lsb_lsf_header in the WPR region 203 - * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon 204 - * @lazy_bootstrap: skip bootstrapping by ACR 205 - * @status: bootstrapping status 206 - * 207 - * An array of these is written at the beginning of the WPR region, one for 208 - * each managed falcon. The array is terminated by an instance which falcon_id 209 - * is LSF_FALCON_ID_INVALID. 210 - */ 211 - struct acr_r352_lsf_wpr_header { 212 - u32 falcon_id; 213 - u32 lsb_offset; 214 - u32 bootstrap_owner; 215 - u32 lazy_bootstrap; 216 - u32 status; 217 - #define LSF_IMAGE_STATUS_NONE 0 218 - #define LSF_IMAGE_STATUS_COPY 1 219 - #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 220 - #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 221 - #define LSF_IMAGE_STATUS_VALIDATION_DONE 4 222 - #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 223 - #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 224 - }; 225 - 226 - /** 227 - * struct ls_ucode_img_r352 - ucode image augmented with r352 headers 228 - */ 229 - struct ls_ucode_img_r352 { 230 - struct ls_ucode_img base; 231 - 232 - const struct acr_r352_lsf_func *func; 233 - 234 - struct acr_r352_lsf_wpr_header wpr_header; 235 - struct acr_r352_lsf_lsb_header lsb_header; 236 - }; 237 - #define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base) 238 - 239 - /** 240 - * ls_ucode_img_load() - create a lsf_ucode_img and load it 241 - */ 242 - struct ls_ucode_img * 243 - acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, 244 - const struct nvkm_secboot *sb, 245 - enum nvkm_secboot_falcon falcon_id) 246 - { 247 - const struct nvkm_subdev *subdev = acr->base.subdev; 248 - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; 249 - struct ls_ucode_img_r352 *img; 250 - int ret; 251 - 252 - img = kzalloc(sizeof(*img), GFP_KERNEL); 253 - if (!img) 254 - return ERR_PTR(-ENOMEM); 255 - 256 - img->base.falcon_id = falcon_id; 257 - 258 - ret = func->load(sb, func->version_max, &img->base); 259 - if (ret < 0) { 260 - kfree(img->base.ucode_data); 261 - kfree(img->base.sig); 262 - kfree(img); 263 - return ERR_PTR(ret); 264 - } 265 - 266 - img->func = func->version[ret]; 267 - 268 - /* Check that the signature size matches our expectations... */ 269 - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { 270 - nvkm_error(subdev, "invalid signature size for %s falcon!\n", 271 - nvkm_secboot_falcon_name[falcon_id]); 272 - return ERR_PTR(-EINVAL); 273 - } 274 - 275 - /* Copy signature to the right place */ 276 - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); 277 - 278 - /* not needed? the signature should already have the right value */ 279 - img->lsb_header.signature.falcon_id = falcon_id; 280 - 281 - return &img->base; 282 - } 283 - 284 - #define LSF_LSB_HEADER_ALIGN 256 285 - #define LSF_BL_DATA_ALIGN 256 286 - #define LSF_BL_DATA_SIZE_ALIGN 256 287 - #define LSF_BL_CODE_SIZE_ALIGN 256 288 - #define LSF_UCODE_DATA_ALIGN 4096 289 - 290 - /** 291 - * acr_r352_ls_img_fill_headers - fill the WPR and LSB headers of an image 292 - * @acr: ACR to use 293 - * @img: image to generate for 294 - * @offset: offset in the WPR region where this image starts 295 - * 296 - * Allocate space in the WPR area from offset and write the WPR and LSB headers 297 - * accordingly. 298 - * 299 - * Return: offset at the end of this image. 300 - */ 301 - static u32 302 - acr_r352_ls_img_fill_headers(struct acr_r352 *acr, 303 - struct ls_ucode_img_r352 *img, u32 offset) 304 - { 305 - struct ls_ucode_img *_img = &img->base; 306 - struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header; 307 - struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header; 308 - struct ls_ucode_img_desc *desc = &_img->ucode_desc; 309 - const struct acr_r352_lsf_func *func = img->func; 310 - 311 - /* Fill WPR header */ 312 - whdr->falcon_id = _img->falcon_id; 313 - whdr->bootstrap_owner = acr->base.boot_falcon; 314 - whdr->status = LSF_IMAGE_STATUS_COPY; 315 - 316 - /* Skip bootstrapping falcons started by someone else than ACR */ 317 - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) 318 - whdr->lazy_bootstrap = 1; 319 - 320 - /* Align, save off, and include an LSB header size */ 321 - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); 322 - whdr->lsb_offset = offset; 323 - offset += sizeof(*lhdr); 324 - 325 - /* 326 - * Align, save off, and include the original (static) ucode 327 - * image size 328 - */ 329 - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); 330 - _img->ucode_off = lhdr->ucode_off = offset; 331 - offset += _img->ucode_size; 332 - 333 - /* 334 - * For falcons that use a boot loader (BL), we append a loader 335 - * desc structure on the end of the ucode image and consider 336 - * this the boot loader data. The host will then copy the loader 337 - * desc args to this space within the WPR region (before locking 338 - * down) and the HS bin will then copy them to DMEM 0 for the 339 - * loader. 340 - */ 341 - lhdr->bl_code_size = ALIGN(desc->bootloader_size, 342 - LSF_BL_CODE_SIZE_ALIGN); 343 - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, 344 - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; 345 - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + 346 - lhdr->bl_code_size - lhdr->ucode_size; 347 - /* 348 - * Though the BL is located at 0th offset of the image, the VA 349 - * is different to make sure that it doesn't collide the actual 350 - * OS VA range 351 - */ 352 - lhdr->bl_imem_off = desc->bootloader_imem_offset; 353 - lhdr->app_code_off = desc->app_start_offset + 354 - desc->app_resident_code_offset; 355 - lhdr->app_code_size = desc->app_resident_code_size; 356 - lhdr->app_data_off = desc->app_start_offset + 357 - desc->app_resident_data_offset; 358 - lhdr->app_data_size = desc->app_resident_data_size; 359 - 360 - lhdr->flags = func->lhdr_flags; 361 - if (_img->falcon_id == acr->base.boot_falcon) 362 - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; 363 - 364 - /* Align and save off BL descriptor size */ 365 - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); 366 - 367 - /* 368 - * Align, save off, and include the additional BL data 369 - */ 370 - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); 371 - lhdr->bl_data_off = offset; 372 - offset += lhdr->bl_data_size; 373 - 374 - return offset; 375 - } 376 - 377 - /** 378 - * acr_r352_ls_fill_headers - fill WPR and LSB headers of all managed images 379 - */ 380 - int 381 - acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) 382 - { 383 - struct ls_ucode_img_r352 *img; 384 - struct list_head *l; 385 - u32 count = 0; 386 - u32 offset; 387 - 388 - /* Count the number of images to manage */ 389 - list_for_each(l, imgs) 390 - count++; 391 - 392 - /* 393 - * Start with an array of WPR headers at the base of the WPR. 394 - * The expectation here is that the secure falcon will do a single DMA 395 - * read of this array and cache it internally so it's ok to pack these. 396 - * Also, we add 1 to the falcon count to indicate the end of the array. 397 - */ 398 - offset = sizeof(img->wpr_header) * (count + 1); 399 - 400 - /* 401 - * Walk the managed falcons, accounting for the LSB structs 402 - * as well as the ucode images. 403 - */ 404 - list_for_each_entry(img, imgs, base.node) { 405 - offset = acr_r352_ls_img_fill_headers(acr, img, offset); 406 - } 407 - 408 - return offset; 409 - } 410 - 411 - /** 412 - * acr_r352_ls_write_wpr - write the WPR blob contents 413 - */ 414 - int 415 - acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, 416 - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) 417 - { 418 - struct ls_ucode_img *_img; 419 - u32 pos = 0; 420 - u32 max_desc_size = 0; 421 - u8 *gdesc; 422 - 423 - /* Figure out how large we need gdesc to be. */ 424 - list_for_each_entry(_img, imgs, node) { 425 - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); 426 - const struct acr_r352_lsf_func *ls_func = img->func; 427 - 428 - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); 429 - } 430 - 431 - gdesc = kmalloc(max_desc_size, GFP_KERNEL); 432 - if (!gdesc) 433 - return -ENOMEM; 434 - 435 - nvkm_kmap(wpr_blob); 436 - 437 - list_for_each_entry(_img, imgs, node) { 438 - struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img); 439 - const struct acr_r352_lsf_func *ls_func = img->func; 440 - 441 - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, 442 - sizeof(img->wpr_header)); 443 - 444 - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, 445 - &img->lsb_header, sizeof(img->lsb_header)); 446 - 447 - /* Generate and write BL descriptor */ 448 - memset(gdesc, 0, ls_func->bl_desc_size); 449 - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); 450 - 451 - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, 452 - gdesc, ls_func->bl_desc_size); 453 - 454 - /* Copy ucode */ 455 - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, 456 - _img->ucode_data, _img->ucode_size); 457 - 458 - pos += sizeof(img->wpr_header); 459 - } 460 - 461 - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); 462 - 463 - nvkm_done(wpr_blob); 464 - 465 - kfree(gdesc); 466 - 467 - return 0; 468 - } 469 - 470 - /* Both size and address of WPR need to be 256K-aligned */ 471 - #define WPR_ALIGNMENT 0x40000 472 - /** 473 - * acr_r352_prepare_ls_blob() - prepare the LS blob 474 - * 475 - * For each securely managed falcon, load the FW, signatures and bootloaders and 476 - * prepare a ucode blob. Then, compute the offsets in the WPR region for each 477 - * blob, and finally write the headers and ucode blobs into a GPU object that 478 - * will be copied into the WPR region by the HS firmware. 479 - */ 480 - static int 481 - acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb) 482 - { 483 - const struct nvkm_subdev *subdev = acr->base.subdev; 484 - struct list_head imgs; 485 - struct ls_ucode_img *img, *t; 486 - unsigned long managed_falcons = acr->base.managed_falcons; 487 - u64 wpr_addr = sb->wpr_addr; 488 - u32 wpr_size = sb->wpr_size; 489 - int managed_count = 0; 490 - u32 image_wpr_size, ls_blob_size; 491 - int falcon_id; 492 - int ret; 493 - 494 - INIT_LIST_HEAD(&imgs); 495 - 496 - /* Load all LS blobs */ 497 - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { 498 - struct ls_ucode_img *img; 499 - 500 - img = acr->func->ls_ucode_img_load(acr, sb, falcon_id); 501 - if (IS_ERR(img)) { 502 - if (acr->base.optional_falcons & BIT(falcon_id)) { 503 - managed_falcons &= ~BIT(falcon_id); 504 - nvkm_info(subdev, "skipping %s falcon...\n", 505 - nvkm_secboot_falcon_name[falcon_id]); 506 - continue; 507 - } 508 - ret = PTR_ERR(img); 509 - goto cleanup; 510 - } 511 - 512 - list_add_tail(&img->node, &imgs); 513 - managed_count++; 514 - } 515 - 516 - /* Commit the actual list of falcons we will manage from now on */ 517 - acr->base.managed_falcons = managed_falcons; 518 - 519 - /* 520 - * If the boot falcon has a firmare, let it manage the bootstrap of other 521 - * falcons. 522 - */ 523 - if (acr->func->ls_func[acr->base.boot_falcon] && 524 - (managed_falcons & BIT(acr->base.boot_falcon))) { 525 - for_each_set_bit(falcon_id, &managed_falcons, 526 - NVKM_SECBOOT_FALCON_END) { 527 - if (falcon_id == acr->base.boot_falcon) 528 - continue; 529 - 530 - acr->lazy_bootstrap |= BIT(falcon_id); 531 - } 532 - } 533 - 534 - /* 535 - * Fill the WPR and LSF headers with the right offsets and compute 536 - * required WPR size 537 - */ 538 - image_wpr_size = acr->func->ls_fill_headers(acr, &imgs); 539 - image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT); 540 - 541 - ls_blob_size = image_wpr_size; 542 - 543 - /* 544 - * If we need a shadow area, allocate twice the size and use the 545 - * upper half as WPR 546 - */ 547 - if (wpr_size == 0 && acr->func->shadow_blob) 548 - ls_blob_size *= 2; 549 - 550 - /* Allocate GPU object that will contain the WPR region */ 551 - ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT, 552 - false, NULL, &acr->ls_blob); 553 - if (ret) 554 - goto cleanup; 555 - 556 - nvkm_debug(subdev, "%d managed LS falcons, WPR size is %d bytes\n", 557 - managed_count, image_wpr_size); 558 - 559 - /* If WPR address and size are not fixed, set them to fit the LS blob */ 560 - if (wpr_size == 0) { 561 - wpr_addr = acr->ls_blob->addr; 562 - if (acr->func->shadow_blob) 563 - wpr_addr += acr->ls_blob->size / 2; 564 - 565 - wpr_size = image_wpr_size; 566 - /* 567 - * But if the WPR region is set by the bootloader, it is illegal for 568 - * the HS blob to be larger than this region. 569 - */ 570 - } else if (image_wpr_size > wpr_size) { 571 - nvkm_error(subdev, "WPR region too small for FW blob!\n"); 572 - nvkm_error(subdev, "required: %dB\n", image_wpr_size); 573 - nvkm_error(subdev, "available: %dB\n", wpr_size); 574 - ret = -ENOSPC; 575 - goto cleanup; 576 - } 577 - 578 - /* Write LS blob */ 579 - ret = acr->func->ls_write_wpr(acr, &imgs, acr->ls_blob, wpr_addr); 580 - if (ret) 581 - nvkm_gpuobj_del(&acr->ls_blob); 582 - 583 - cleanup: 584 - list_for_each_entry_safe(img, t, &imgs, node) { 585 - kfree(img->ucode_data); 586 - kfree(img->sig); 587 - kfree(img); 588 - } 589 - 590 - return ret; 591 - } 592 - 593 - 594 - 595 - 596 - void 597 - acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, 598 - void *_desc) 599 - { 600 - struct hsflcn_acr_desc *desc = _desc; 601 - struct nvkm_gpuobj *ls_blob = acr->ls_blob; 602 - 603 - /* WPR region information if WPR is not fixed */ 604 - if (sb->wpr_size == 0) { 605 - u64 wpr_start = ls_blob->addr; 606 - u64 wpr_end = wpr_start + ls_blob->size; 607 - 608 - desc->wpr_region_id = 1; 609 - desc->regions.no_regions = 2; 610 - desc->regions.region_props[0].start_addr = wpr_start >> 8; 611 - desc->regions.region_props[0].end_addr = wpr_end >> 8; 612 - desc->regions.region_props[0].region_id = 1; 613 - desc->regions.region_props[0].read_mask = 0xf; 614 - desc->regions.region_props[0].write_mask = 0xc; 615 - desc->regions.region_props[0].client_mask = 0x2; 616 - } else { 617 - desc->ucode_blob_base = ls_blob->addr; 618 - desc->ucode_blob_size = ls_blob->size; 619 - } 620 - } 621 - 622 - static void 623 - acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, 624 - u64 offset) 625 - { 626 - struct acr_r352_flcn_bl_desc *bl_desc = _bl_desc; 627 - u64 addr_code, addr_data; 628 - 629 - addr_code = offset >> 8; 630 - addr_data = (offset + hdr->data_dma_base) >> 8; 631 - 632 - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; 633 - bl_desc->code_dma_base = lower_32_bits(addr_code); 634 - bl_desc->non_sec_code_off = hdr->non_sec_code_off; 635 - bl_desc->non_sec_code_size = hdr->non_sec_code_size; 636 - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); 637 - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); 638 - bl_desc->code_entry_point = 0; 639 - bl_desc->data_dma_base = lower_32_bits(addr_data); 640 - bl_desc->data_size = hdr->data_size; 641 - } 642 - 643 - /** 644 - * acr_r352_prepare_hs_blob - load and prepare a HS blob and BL descriptor 645 - * 646 - * @sb secure boot instance to prepare for 647 - * @fw name of the HS firmware to load 648 - * @blob pointer to gpuobj that will be allocated to receive the HS FW payload 649 - * @bl_desc pointer to the BL descriptor to write for this firmware 650 - * @patch whether we should patch the HS descriptor (only for HS loaders) 651 - */ 652 - static int 653 - acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb, 654 - const char *fw, struct nvkm_gpuobj **blob, 655 - struct hsf_load_header *load_header, bool patch) 656 - { 657 - struct nvkm_subdev *subdev = &sb->subdev; 658 - void *acr_image; 659 - struct fw_bin_header *hsbin_hdr; 660 - struct hsf_fw_header *fw_hdr; 661 - struct hsf_load_header *load_hdr; 662 - void *acr_data; 663 - int ret; 664 - 665 - acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw); 666 - if (IS_ERR(acr_image)) 667 - return PTR_ERR(acr_image); 668 - 669 - hsbin_hdr = acr_image; 670 - fw_hdr = acr_image + hsbin_hdr->header_offset; 671 - load_hdr = acr_image + fw_hdr->hdr_offset; 672 - acr_data = acr_image + hsbin_hdr->data_offset; 673 - 674 - /* Patch descriptor with WPR information? */ 675 - if (patch) { 676 - struct hsflcn_acr_desc *desc; 677 - 678 - desc = acr_data + load_hdr->data_dma_base; 679 - acr->func->fixup_hs_desc(acr, sb, desc); 680 - } 681 - 682 - if (load_hdr->num_apps > ACR_R352_MAX_APPS) { 683 - nvkm_error(subdev, "more apps (%d) than supported (%d)!", 684 - load_hdr->num_apps, ACR_R352_MAX_APPS); 685 - ret = -EINVAL; 686 - goto cleanup; 687 - } 688 - memcpy(load_header, load_hdr, sizeof(*load_header) + 689 - (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps)); 690 - 691 - /* Create ACR blob and copy HS data to it */ 692 - ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), 693 - 0x1000, false, NULL, blob); 694 - if (ret) 695 - goto cleanup; 696 - 697 - nvkm_kmap(*blob); 698 - nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size); 699 - nvkm_done(*blob); 700 - 701 - cleanup: 702 - kfree(acr_image); 703 - 704 - return ret; 705 - } 706 - 707 - /** 708 - * acr_r352_load_blobs - load blobs common to all ACR V1 versions. 709 - * 710 - * This includes the LS blob, HS ucode loading blob, and HS bootloader. 711 - * 712 - * The HS ucode unload blob is only used on dGPU if the WPR region is variable. 713 - */ 714 - int 715 - acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) 716 - { 717 - struct nvkm_subdev *subdev = &sb->subdev; 718 - int ret; 719 - 720 - /* Firmware already loaded? */ 721 - if (acr->firmware_ok) 722 - return 0; 723 - 724 - /* Load and prepare the managed falcon's firmwares */ 725 - ret = acr_r352_prepare_ls_blob(acr, sb); 726 - if (ret) 727 - return ret; 728 - 729 - /* Load the HS firmware that will load the LS firmwares */ 730 - if (!acr->load_blob) { 731 - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_load", 732 - &acr->load_blob, 733 - &acr->load_bl_header, true); 734 - if (ret) 735 - return ret; 736 - } 737 - 738 - /* If the ACR region is dynamically programmed, we need an unload FW */ 739 - if (sb->wpr_size == 0) { 740 - ret = acr_r352_prepare_hs_blob(acr, sb, "acr/ucode_unload", 741 - &acr->unload_blob, 742 - &acr->unload_bl_header, false); 743 - if (ret) 744 - return ret; 745 - } 746 - 747 - /* Load the HS firmware bootloader */ 748 - if (!acr->hsbl_blob) { 749 - acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0); 750 - if (IS_ERR(acr->hsbl_blob)) { 751 - ret = PTR_ERR(acr->hsbl_blob); 752 - acr->hsbl_blob = NULL; 753 - return ret; 754 - } 755 - 756 - if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) { 757 - acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev, 758 - "acr/unload_bl", 0); 759 - if (IS_ERR(acr->hsbl_unload_blob)) { 760 - ret = PTR_ERR(acr->hsbl_unload_blob); 761 - acr->hsbl_unload_blob = NULL; 762 - return ret; 763 - } 764 - } else { 765 - acr->hsbl_unload_blob = acr->hsbl_blob; 766 - } 767 - } 768 - 769 - acr->firmware_ok = true; 770 - nvkm_debug(&sb->subdev, "LS blob successfully created\n"); 771 - 772 - return 0; 773 - } 774 - 775 - /** 776 - * acr_r352_load() - prepare HS falcon to run the specified blob, mapped. 777 - * 778 - * Returns the start address to use, or a negative error value. 779 - */ 780 - static int 781 - acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon, 782 - struct nvkm_gpuobj *blob, u64 offset) 783 - { 784 - struct acr_r352 *acr = acr_r352(_acr); 785 - const u32 bl_desc_size = acr->func->hs_bl_desc_size; 786 - const struct hsf_load_header *load_hdr; 787 - struct fw_bin_header *bl_hdr; 788 - struct fw_bl_desc *hsbl_desc; 789 - void *bl, *blob_data, *hsbl_code, *hsbl_data; 790 - u32 code_size; 791 - u8 *bl_desc; 792 - 793 - bl_desc = kzalloc(bl_desc_size, GFP_KERNEL); 794 - if (!bl_desc) 795 - return -ENOMEM; 796 - 797 - /* Find the bootloader descriptor for our blob and copy it */ 798 - if (blob == acr->load_blob) { 799 - load_hdr = &acr->load_bl_header; 800 - bl = acr->hsbl_blob; 801 - } else if (blob == acr->unload_blob) { 802 - load_hdr = &acr->unload_bl_header; 803 - bl = acr->hsbl_unload_blob; 804 - } else { 805 - nvkm_error(_acr->subdev, "invalid secure boot blob!\n"); 806 - kfree(bl_desc); 807 - return -EINVAL; 808 - } 809 - 810 - bl_hdr = bl; 811 - hsbl_desc = bl + bl_hdr->header_offset; 812 - blob_data = bl + bl_hdr->data_offset; 813 - hsbl_code = blob_data + hsbl_desc->code_off; 814 - hsbl_data = blob_data + hsbl_desc->data_off; 815 - code_size = ALIGN(hsbl_desc->code_size, 256); 816 - 817 - /* 818 - * Copy HS bootloader data 819 - */ 820 - nvkm_falcon_load_dmem(falcon, hsbl_data, 0x0, hsbl_desc->data_size, 0); 821 - 822 - /* Copy HS bootloader code to end of IMEM */ 823 - nvkm_falcon_load_imem(falcon, hsbl_code, falcon->code.limit - code_size, 824 - code_size, hsbl_desc->start_tag, 0, false); 825 - 826 - /* Generate the BL header */ 827 - acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset); 828 - 829 - /* 830 - * Copy HS BL header where the HS descriptor expects it to be 831 - */ 832 - nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off, 833 - bl_desc_size, 0); 834 - 835 - kfree(bl_desc); 836 - return hsbl_desc->start_tag << 8; 837 - } 838 - 839 - static int 840 - acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb) 841 - { 842 - struct nvkm_subdev *subdev = &sb->subdev; 843 - int i; 844 - 845 - /* Run the unload blob to unprotect the WPR region */ 846 - if (acr->unload_blob && sb->wpr_set) { 847 - int ret; 848 - 849 - nvkm_debug(subdev, "running HS unload blob\n"); 850 - ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon); 851 - if (ret < 0) 852 - return ret; 853 - /* 854 - * Unload blob will return this error code - it is not an error 855 - * and the expected behavior on RM as well 856 - */ 857 - if (ret && ret != 0x1d) { 858 - nvkm_error(subdev, "HS unload failed, ret 0x%08x\n", ret); 859 - return -EINVAL; 860 - } 861 - nvkm_debug(subdev, "HS unload blob completed\n"); 862 - } 863 - 864 - for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) 865 - acr->falcon_state[i] = NON_SECURE; 866 - 867 - sb->wpr_set = false; 868 - 869 - return 0; 870 - } 871 - 872 - /** 873 - * Check if the WPR region has been indeed set by the ACR firmware, and 874 - * matches where it should be. 875 - */ 876 - static bool 877 - acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb) 878 - { 879 - const struct nvkm_subdev *subdev = &sb->subdev; 880 - const struct nvkm_device *device = subdev->device; 881 - u64 wpr_lo, wpr_hi; 882 - u64 wpr_range_lo, wpr_range_hi; 883 - 884 - nvkm_wr32(device, 0x100cd4, 0x2); 885 - wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff); 886 - wpr_lo <<= 8; 887 - nvkm_wr32(device, 0x100cd4, 0x3); 888 - wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff); 889 - wpr_hi <<= 8; 890 - 891 - if (sb->wpr_size != 0) { 892 - wpr_range_lo = sb->wpr_addr; 893 - wpr_range_hi = wpr_range_lo + sb->wpr_size; 894 - } else { 895 - wpr_range_lo = acr->ls_blob->addr; 896 - wpr_range_hi = wpr_range_lo + acr->ls_blob->size; 897 - } 898 - 899 - return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi && 900 - wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi); 901 - } 902 - 903 - static int 904 - acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) 905 - { 906 - const struct nvkm_subdev *subdev = &sb->subdev; 907 - unsigned long managed_falcons = acr->base.managed_falcons; 908 - int falcon_id; 909 - int ret; 910 - 911 - if (sb->wpr_set) 912 - return 0; 913 - 914 - /* Make sure all blobs are ready */ 915 - ret = acr_r352_load_blobs(acr, sb); 916 - if (ret) 917 - return ret; 918 - 919 - nvkm_debug(subdev, "running HS load blob\n"); 920 - ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon); 921 - /* clear halt interrupt */ 922 - nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10); 923 - sb->wpr_set = acr_r352_wpr_is_set(acr, sb); 924 - if (ret < 0) { 925 - return ret; 926 - } else if (ret > 0) { 927 - nvkm_error(subdev, "HS load failed, ret 0x%08x\n", ret); 928 - return -EINVAL; 929 - } 930 - nvkm_debug(subdev, "HS load blob completed\n"); 931 - /* WPR must be set at this point */ 932 - if (!sb->wpr_set) { 933 - nvkm_error(subdev, "ACR blob completed but WPR not set!\n"); 934 - return -EINVAL; 935 - } 936 - 937 - /* Run LS firmwares post_run hooks */ 938 - for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { 939 - const struct acr_r352_ls_func *func = 940 - acr->func->ls_func[falcon_id]; 941 - 942 - if (func->post_run) { 943 - ret = func->post_run(&acr->base, sb); 944 - if (ret) 945 - return ret; 946 - } 947 - } 948 - 949 - return 0; 950 - } 951 - 952 - /** 953 - * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded 954 - * 955 - * Reset is done by re-executing secure boot from scratch, with lazy bootstrap 956 - * disabled. This has the effect of making all managed falcons ready-to-run. 957 - */ 958 - static int 959 - acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, 960 - unsigned long falcon_mask) 961 - { 962 - int falcon; 963 - int ret; 964 - 965 - /* 966 - * Perform secure boot each time we are called on FECS. Since only FECS 967 - * and GPCCS are managed and started together, this ought to be safe. 968 - */ 969 - if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS))) 970 - goto end; 971 - 972 - ret = acr_r352_shutdown(acr, sb); 973 - if (ret) 974 - return ret; 975 - 976 - ret = acr_r352_bootstrap(acr, sb); 977 - if (ret) 978 - return ret; 979 - 980 - end: 981 - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { 982 - acr->falcon_state[falcon] = RESET; 983 - } 984 - return 0; 985 - } 986 - 987 - /* 988 - * acr_r352_reset() - execute secure boot from the prepared state 989 - * 990 - * Load the HS bootloader and ask the falcon to run it. This will in turn 991 - * load the HS firmware and run it, so once the falcon stops all the managed 992 - * falcons should have their LS firmware loaded and be ready to run. 993 - */ 994 - static int 995 - acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, 996 - unsigned long falcon_mask) 997 - { 998 - struct acr_r352 *acr = acr_r352(_acr); 999 - struct nvkm_msgqueue *queue; 1000 - int falcon; 1001 - bool wpr_already_set = sb->wpr_set; 1002 - int ret; 1003 - 1004 - /* Make sure secure boot is performed */ 1005 - ret = acr_r352_bootstrap(acr, sb); 1006 - if (ret) 1007 - return ret; 1008 - 1009 - /* No PMU interface? */ 1010 - if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) { 1011 - /* Redo secure boot entirely if it was already done */ 1012 - if (wpr_already_set) 1013 - return acr_r352_reset_nopmu(acr, sb, falcon_mask); 1014 - /* Else return the result of the initial invokation */ 1015 - else 1016 - return ret; 1017 - } 1018 - 1019 - switch (_acr->boot_falcon) { 1020 - case NVKM_SECBOOT_FALCON_PMU: 1021 - queue = sb->subdev.device->pmu->queue; 1022 - break; 1023 - case NVKM_SECBOOT_FALCON_SEC2: 1024 - queue = sb->subdev.device->sec2->queue; 1025 - break; 1026 - default: 1027 - return -EINVAL; 1028 - } 1029 - 1030 - /* Otherwise just ask the LS firmware to reset the falcon */ 1031 - for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) 1032 - nvkm_debug(&sb->subdev, "resetting %s falcon\n", 1033 - nvkm_secboot_falcon_name[falcon]); 1034 - ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask); 1035 - if (ret) { 1036 - nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret); 1037 - return ret; 1038 - } 1039 - nvkm_debug(&sb->subdev, "falcon reset done\n"); 1040 - 1041 - return 0; 1042 - } 1043 - 1044 - static int 1045 - acr_r352_fini(struct nvkm_acr *_acr, struct nvkm_secboot *sb, bool suspend) 1046 - { 1047 - struct acr_r352 *acr = acr_r352(_acr); 1048 - 1049 - return acr_r352_shutdown(acr, sb); 1050 - } 1051 - 1052 - static void 1053 - acr_r352_dtor(struct nvkm_acr *_acr) 1054 - { 1055 - struct acr_r352 *acr = acr_r352(_acr); 1056 - 1057 - nvkm_gpuobj_del(&acr->unload_blob); 1058 - 1059 - if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU) 1060 - kfree(acr->hsbl_unload_blob); 1061 - kfree(acr->hsbl_blob); 1062 - nvkm_gpuobj_del(&acr->load_blob); 1063 - nvkm_gpuobj_del(&acr->ls_blob); 1064 - 1065 - kfree(acr); 1066 - } 1067 - 1068 - static const struct acr_r352_lsf_func 1069 - acr_r352_ls_fecs_func_0 = { 1070 - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, 1071 - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1072 - }; 1073 - 1074 - const struct acr_r352_ls_func 1075 - acr_r352_ls_fecs_func = { 1076 - .load = acr_ls_ucode_load_fecs, 1077 - .version_max = 0, 1078 - .version = { 1079 - &acr_r352_ls_fecs_func_0, 1080 - } 1081 - }; 1082 - 1083 - static const struct acr_r352_lsf_func 1084 - acr_r352_ls_gpccs_func_0 = { 1085 - .generate_bl_desc = acr_r352_generate_flcn_bl_desc, 1086 - .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1087 - /* GPCCS will be loaded using PRI */ 1088 - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 1089 - }; 1090 - 1091 - static const struct acr_r352_ls_func 1092 - acr_r352_ls_gpccs_func = { 1093 - .load = acr_ls_ucode_load_gpccs, 1094 - .version_max = 0, 1095 - .version = { 1096 - &acr_r352_ls_gpccs_func_0, 1097 - } 1098 - }; 1099 - 1100 - 1101 - 1102 - /** 1103 - * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor 1104 - * @dma_idx: DMA context to be used by BL while loading code/data 1105 - * @code_dma_base: 256B-aligned Physical FB Address where code is located 1106 - * @total_code_size: total size of the code part in the ucode 1107 - * @code_size_to_load: size of the code part to load in PMU IMEM. 1108 - * @code_entry_point: entry point in the code. 1109 - * @data_dma_base: Physical FB address where data part of ucode is located 1110 - * @data_size: Total size of the data portion. 1111 - * @overlay_dma_base: Physical Fb address for resident code present in ucode 1112 - * @argc: Total number of args 1113 - * @argv: offset where args are copied into PMU's DMEM. 1114 - * 1115 - * Structure used by the PMU bootloader to load the rest of the code 1116 - */ 1117 - struct acr_r352_pmu_bl_desc { 1118 - u32 dma_idx; 1119 - u32 code_dma_base; 1120 - u32 code_size_total; 1121 - u32 code_size_to_load; 1122 - u32 code_entry_point; 1123 - u32 data_dma_base; 1124 - u32 data_size; 1125 - u32 overlay_dma_base; 1126 - u32 argc; 1127 - u32 argv; 1128 - u16 code_dma_base1; 1129 - u16 data_dma_base1; 1130 - u16 overlay_dma_base1; 1131 - }; 1132 - 1133 - /** 1134 - * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image 1135 - * 1136 - */ 1137 - static void 1138 - acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr, 1139 - const struct ls_ucode_img *img, u64 wpr_addr, 1140 - void *_desc) 1141 - { 1142 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 1143 - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; 1144 - struct acr_r352_pmu_bl_desc *desc = _desc; 1145 - u64 base; 1146 - u64 addr_code; 1147 - u64 addr_data; 1148 - u32 addr_args; 1149 - 1150 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 1151 - addr_code = (base + pdesc->app_resident_code_offset) >> 8; 1152 - addr_data = (base + pdesc->app_resident_data_offset) >> 8; 1153 - addr_args = pmu->falcon->data.limit; 1154 - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; 1155 - 1156 - desc->dma_idx = FALCON_DMAIDX_UCODE; 1157 - desc->code_dma_base = lower_32_bits(addr_code); 1158 - desc->code_dma_base1 = upper_32_bits(addr_code); 1159 - desc->code_size_total = pdesc->app_size; 1160 - desc->code_size_to_load = pdesc->app_resident_code_size; 1161 - desc->code_entry_point = pdesc->app_imem_entry; 1162 - desc->data_dma_base = lower_32_bits(addr_data); 1163 - desc->data_dma_base1 = upper_32_bits(addr_data); 1164 - desc->data_size = pdesc->app_resident_data_size; 1165 - desc->overlay_dma_base = lower_32_bits(addr_code); 1166 - desc->overlay_dma_base1 = upper_32_bits(addr_code); 1167 - desc->argc = 1; 1168 - desc->argv = addr_args; 1169 - } 1170 - 1171 - static const struct acr_r352_lsf_func 1172 - acr_r352_ls_pmu_func_0 = { 1173 - .generate_bl_desc = acr_r352_generate_pmu_bl_desc, 1174 - .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc), 1175 - }; 1176 - 1177 - static const struct acr_r352_ls_func 1178 - acr_r352_ls_pmu_func = { 1179 - .load = acr_ls_ucode_load_pmu, 1180 - .post_run = acr_ls_pmu_post_run, 1181 - .version_max = 0, 1182 - .version = { 1183 - &acr_r352_ls_pmu_func_0, 1184 - } 1185 - }; 1186 - 1187 - const struct acr_r352_func 1188 - acr_r352_func = { 1189 - .fixup_hs_desc = acr_r352_fixup_hs_desc, 1190 - .generate_hs_bl_desc = acr_r352_generate_hs_bl_desc, 1191 - .hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc), 1192 - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, 1193 - .ls_fill_headers = acr_r352_ls_fill_headers, 1194 - .ls_write_wpr = acr_r352_ls_write_wpr, 1195 - .ls_func = { 1196 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func, 1197 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func, 1198 - [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func, 1199 - }, 1200 - }; 1201 - 1202 - static const struct nvkm_acr_func 1203 - acr_r352_base_func = { 1204 - .dtor = acr_r352_dtor, 1205 - .fini = acr_r352_fini, 1206 - .load = acr_r352_load, 1207 - .reset = acr_r352_reset, 1208 - }; 1209 - 1210 - struct nvkm_acr * 1211 - acr_r352_new_(const struct acr_r352_func *func, 1212 - enum nvkm_secboot_falcon boot_falcon, 1213 - unsigned long managed_falcons) 1214 - { 1215 - struct acr_r352 *acr; 1216 - int i; 1217 - 1218 - /* Check that all requested falcons are supported */ 1219 - for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) { 1220 - if (!func->ls_func[i]) 1221 - return ERR_PTR(-ENOTSUPP); 1222 - } 1223 - 1224 - acr = kzalloc(sizeof(*acr), GFP_KERNEL); 1225 - if (!acr) 1226 - return ERR_PTR(-ENOMEM); 1227 - 1228 - acr->base.boot_falcon = boot_falcon; 1229 - acr->base.managed_falcons = managed_falcons; 1230 - acr->base.func = &acr_r352_base_func; 1231 - acr->func = func; 1232 - 1233 - return &acr->base; 1234 - } 1235 - 1236 - struct nvkm_acr * 1237 - acr_r352_new(unsigned long managed_falcons) 1238 - { 1239 - return acr_r352_new_(&acr_r352_func, NVKM_SECBOOT_FALCON_PMU, 1240 - managed_falcons); 1241 - }
-167
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - #ifndef __NVKM_SECBOOT_ACR_R352_H__ 23 - #define __NVKM_SECBOOT_ACR_R352_H__ 24 - 25 - #include "acr.h" 26 - #include "ls_ucode.h" 27 - #include "hs_ucode.h" 28 - 29 - struct ls_ucode_img; 30 - 31 - #define ACR_R352_MAX_APPS 8 32 - 33 - #define LSF_FLAG_LOAD_CODE_AT_0 1 34 - #define LSF_FLAG_DMACTL_REQ_CTX 4 35 - #define LSF_FLAG_FORCE_PRIV_LOAD 8 36 - 37 - static inline u32 38 - hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app) 39 - { 40 - return hdr->apps[app]; 41 - } 42 - 43 - static inline u32 44 - hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app) 45 - { 46 - return hdr->apps[hdr->num_apps + app]; 47 - } 48 - 49 - /** 50 - * struct acr_r352_lsf_func - manages a specific LS firmware version 51 - * 52 - * @generate_bl_desc: function called on a block of bl_desc_size to generate the 53 - * proper bootloader descriptor for this LS firmware 54 - * @bl_desc_size: size of the bootloader descriptor 55 - * @lhdr_flags: LS flags 56 - */ 57 - struct acr_r352_lsf_func { 58 - void (*generate_bl_desc)(const struct nvkm_acr *, 59 - const struct ls_ucode_img *, u64, void *); 60 - u32 bl_desc_size; 61 - u32 lhdr_flags; 62 - }; 63 - 64 - /** 65 - * struct acr_r352_ls_func - manages a single LS falcon 66 - * 67 - * @load: load the external firmware into a ls_ucode_img 68 - * @post_run: hook called right after the ACR is executed 69 - */ 70 - struct acr_r352_ls_func { 71 - int (*load)(const struct nvkm_secboot *, int maxver, 72 - struct ls_ucode_img *); 73 - int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); 74 - int version_max; 75 - const struct acr_r352_lsf_func *version[]; 76 - }; 77 - 78 - struct acr_r352; 79 - 80 - /** 81 - * struct acr_r352_func - manages nuances between ACR versions 82 - * 83 - * @generate_hs_bl_desc: function called on a block of bl_desc_size to generate 84 - * the proper HS bootloader descriptor 85 - * @hs_bl_desc_size: size of the HS bootloader descriptor 86 - */ 87 - struct acr_r352_func { 88 - void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *, 89 - u64); 90 - void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *); 91 - u32 hs_bl_desc_size; 92 - bool shadow_blob; 93 - 94 - struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, 95 - const struct nvkm_secboot *, 96 - enum nvkm_secboot_falcon); 97 - int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); 98 - int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, 99 - struct nvkm_gpuobj *, u64); 100 - 101 - const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END]; 102 - }; 103 - 104 - /** 105 - * struct acr_r352 - ACR data for driver release 352 (and beyond) 106 - */ 107 - struct acr_r352 { 108 - struct nvkm_acr base; 109 - const struct acr_r352_func *func; 110 - 111 - /* 112 - * HS FW - lock WPR region (dGPU only) and load LS FWs 113 - * on Tegra the HS FW copies the LS blob into the fixed WPR instead 114 - */ 115 - struct nvkm_gpuobj *load_blob; 116 - struct { 117 - struct hsf_load_header load_bl_header; 118 - u32 __load_apps[ACR_R352_MAX_APPS * 2]; 119 - }; 120 - 121 - /* HS FW - unlock WPR region (dGPU only) */ 122 - struct nvkm_gpuobj *unload_blob; 123 - struct { 124 - struct hsf_load_header unload_bl_header; 125 - u32 __unload_apps[ACR_R352_MAX_APPS * 2]; 126 - }; 127 - 128 - /* HS bootloader */ 129 - void *hsbl_blob; 130 - 131 - /* HS bootloader for unload blob, if using a different falcon */ 132 - void *hsbl_unload_blob; 133 - 134 - /* LS FWs, to be loaded by the HS ACR */ 135 - struct nvkm_gpuobj *ls_blob; 136 - 137 - /* Firmware already loaded? */ 138 - bool firmware_ok; 139 - 140 - /* Falcons to lazy-bootstrap */ 141 - u32 lazy_bootstrap; 142 - 143 - /* To keep track of the state of all managed falcons */ 144 - enum { 145 - /* In non-secure state, no firmware loaded, no privileges*/ 146 - NON_SECURE = 0, 147 - /* In low-secure mode and ready to be started */ 148 - RESET, 149 - /* In low-secure mode and running */ 150 - RUNNING, 151 - } falcon_state[NVKM_SECBOOT_FALCON_END]; 152 - }; 153 - #define acr_r352(acr) container_of(acr, struct acr_r352, base) 154 - 155 - struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, 156 - enum nvkm_secboot_falcon, unsigned long); 157 - 158 - struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, 159 - const struct nvkm_secboot *, 160 - enum nvkm_secboot_falcon); 161 - int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); 162 - int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, 163 - struct nvkm_gpuobj *, u64); 164 - 165 - void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); 166 - 167 - #endif
-229
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r361.h" 24 - 25 - #include <engine/falcon.h> 26 - #include <core/msgqueue.h> 27 - #include <subdev/pmu.h> 28 - #include <engine/sec2.h> 29 - 30 - static void 31 - acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr, 32 - const struct ls_ucode_img *img, u64 wpr_addr, 33 - void *_desc) 34 - { 35 - struct acr_r361_flcn_bl_desc *desc = _desc; 36 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 37 - u64 base, addr_code, addr_data; 38 - 39 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 40 - addr_code = base + pdesc->app_resident_code_offset; 41 - addr_data = base + pdesc->app_resident_data_offset; 42 - 43 - desc->ctx_dma = FALCON_DMAIDX_UCODE; 44 - desc->code_dma_base = u64_to_flcn64(addr_code); 45 - desc->non_sec_code_off = pdesc->app_resident_code_offset; 46 - desc->non_sec_code_size = pdesc->app_resident_code_size; 47 - desc->code_entry_point = pdesc->app_imem_entry; 48 - desc->data_dma_base = u64_to_flcn64(addr_data); 49 - desc->data_size = pdesc->app_resident_data_size; 50 - } 51 - 52 - void 53 - acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, 54 - u64 offset) 55 - { 56 - struct acr_r361_flcn_bl_desc *bl_desc = _bl_desc; 57 - 58 - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; 59 - bl_desc->code_dma_base = u64_to_flcn64(offset); 60 - bl_desc->non_sec_code_off = hdr->non_sec_code_off; 61 - bl_desc->non_sec_code_size = hdr->non_sec_code_size; 62 - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); 63 - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); 64 - bl_desc->code_entry_point = 0; 65 - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); 66 - bl_desc->data_size = hdr->data_size; 67 - } 68 - 69 - static const struct acr_r352_lsf_func 70 - acr_r361_ls_fecs_func_0 = { 71 - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, 72 - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 73 - }; 74 - 75 - const struct acr_r352_ls_func 76 - acr_r361_ls_fecs_func = { 77 - .load = acr_ls_ucode_load_fecs, 78 - .version_max = 0, 79 - .version = { 80 - &acr_r361_ls_fecs_func_0, 81 - } 82 - }; 83 - 84 - static const struct acr_r352_lsf_func 85 - acr_r361_ls_gpccs_func_0 = { 86 - .generate_bl_desc = acr_r361_generate_flcn_bl_desc, 87 - .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 88 - /* GPCCS will be loaded using PRI */ 89 - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 90 - }; 91 - 92 - const struct acr_r352_ls_func 93 - acr_r361_ls_gpccs_func = { 94 - .load = acr_ls_ucode_load_gpccs, 95 - .version_max = 0, 96 - .version = { 97 - &acr_r361_ls_gpccs_func_0, 98 - } 99 - }; 100 - 101 - struct acr_r361_pmu_bl_desc { 102 - u32 reserved; 103 - u32 dma_idx; 104 - struct flcn_u64 code_dma_base; 105 - u32 total_code_size; 106 - u32 code_size_to_load; 107 - u32 code_entry_point; 108 - struct flcn_u64 data_dma_base; 109 - u32 data_size; 110 - struct flcn_u64 overlay_dma_base; 111 - u32 argc; 112 - u32 argv; 113 - }; 114 - 115 - static void 116 - acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr, 117 - const struct ls_ucode_img *img, u64 wpr_addr, 118 - void *_desc) 119 - { 120 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 121 - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; 122 - struct acr_r361_pmu_bl_desc *desc = _desc; 123 - u64 base, addr_code, addr_data; 124 - u32 addr_args; 125 - 126 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 127 - addr_code = base + pdesc->app_resident_code_offset; 128 - addr_data = base + pdesc->app_resident_data_offset; 129 - addr_args = pmu->falcon->data.limit; 130 - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; 131 - 132 - desc->dma_idx = FALCON_DMAIDX_UCODE; 133 - desc->code_dma_base = u64_to_flcn64(addr_code); 134 - desc->total_code_size = pdesc->app_size; 135 - desc->code_size_to_load = pdesc->app_resident_code_size; 136 - desc->code_entry_point = pdesc->app_imem_entry; 137 - desc->data_dma_base = u64_to_flcn64(addr_data); 138 - desc->data_size = pdesc->app_resident_data_size; 139 - desc->overlay_dma_base = u64_to_flcn64(addr_code); 140 - desc->argc = 1; 141 - desc->argv = addr_args; 142 - } 143 - 144 - static const struct acr_r352_lsf_func 145 - acr_r361_ls_pmu_func_0 = { 146 - .generate_bl_desc = acr_r361_generate_pmu_bl_desc, 147 - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), 148 - }; 149 - 150 - const struct acr_r352_ls_func 151 - acr_r361_ls_pmu_func = { 152 - .load = acr_ls_ucode_load_pmu, 153 - .post_run = acr_ls_pmu_post_run, 154 - .version_max = 0, 155 - .version = { 156 - &acr_r361_ls_pmu_func_0, 157 - } 158 - }; 159 - 160 - static void 161 - acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr, 162 - const struct ls_ucode_img *img, u64 wpr_addr, 163 - void *_desc) 164 - { 165 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 166 - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; 167 - struct acr_r361_pmu_bl_desc *desc = _desc; 168 - u64 base, addr_code, addr_data; 169 - u32 addr_args; 170 - 171 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 172 - /* For some reason we should not add app_resident_code_offset here */ 173 - addr_code = base; 174 - addr_data = base + pdesc->app_resident_data_offset; 175 - addr_args = sec->falcon->data.limit; 176 - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; 177 - 178 - desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE; 179 - desc->code_dma_base = u64_to_flcn64(addr_code); 180 - desc->total_code_size = pdesc->app_size; 181 - desc->code_size_to_load = pdesc->app_resident_code_size; 182 - desc->code_entry_point = pdesc->app_imem_entry; 183 - desc->data_dma_base = u64_to_flcn64(addr_data); 184 - desc->data_size = pdesc->app_resident_data_size; 185 - desc->overlay_dma_base = u64_to_flcn64(addr_code); 186 - desc->argc = 1; 187 - /* args are stored at the beginning of EMEM */ 188 - desc->argv = 0x01000000; 189 - } 190 - 191 - const struct acr_r352_lsf_func 192 - acr_r361_ls_sec2_func_0 = { 193 - .generate_bl_desc = acr_r361_generate_sec2_bl_desc, 194 - .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc), 195 - }; 196 - 197 - static const struct acr_r352_ls_func 198 - acr_r361_ls_sec2_func = { 199 - .load = acr_ls_ucode_load_sec2, 200 - .post_run = acr_ls_sec2_post_run, 201 - .version_max = 0, 202 - .version = { 203 - &acr_r361_ls_sec2_func_0, 204 - } 205 - }; 206 - 207 - 208 - const struct acr_r352_func 209 - acr_r361_func = { 210 - .fixup_hs_desc = acr_r352_fixup_hs_desc, 211 - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, 212 - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 213 - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, 214 - .ls_fill_headers = acr_r352_ls_fill_headers, 215 - .ls_write_wpr = acr_r352_ls_write_wpr, 216 - .ls_func = { 217 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, 218 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, 219 - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, 220 - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func, 221 - }, 222 - }; 223 - 224 - struct nvkm_acr * 225 - acr_r361_new(unsigned long managed_falcons) 226 - { 227 - return acr_r352_new_(&acr_r361_func, NVKM_SECBOOT_FALCON_PMU, 228 - managed_falcons); 229 - }
-71
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_ACR_R361_H__ 24 - #define __NVKM_SECBOOT_ACR_R361_H__ 25 - 26 - #include "acr_r352.h" 27 - 28 - /** 29 - * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor 30 - * @signature: 16B signature for secure code. 0s if no secure code 31 - * @ctx_dma: DMA context to be used by BL while loading code/data 32 - * @code_dma_base: 256B-aligned Physical FB Address where code is located 33 - * (falcon's $xcbase register) 34 - * @non_sec_code_off: offset from code_dma_base where the non-secure code is 35 - * located. The offset must be multiple of 256 to help perf 36 - * @non_sec_code_size: the size of the nonSecure code part. 37 - * @sec_code_off: offset from code_dma_base where the secure code is 38 - * located. The offset must be multiple of 256 to help perf 39 - * @sec_code_size: offset from code_dma_base where the secure code is 40 - * located. The offset must be multiple of 256 to help perf 41 - * @code_entry_point: code entry point which will be invoked by BL after 42 - * code is loaded. 43 - * @data_dma_base: 256B aligned Physical FB Address where data is located. 44 - * (falcon's $xdbase register) 45 - * @data_size: size of data block. Should be multiple of 256B 46 - * 47 - * Structure used by the bootloader to load the rest of the code. This has 48 - * to be filled by host and copied into DMEM at offset provided in the 49 - * hsflcn_bl_desc.bl_desc_dmem_load_off. 50 - */ 51 - struct acr_r361_flcn_bl_desc { 52 - u32 reserved[4]; 53 - u32 signature[4]; 54 - u32 ctx_dma; 55 - struct flcn_u64 code_dma_base; 56 - u32 non_sec_code_off; 57 - u32 non_sec_code_size; 58 - u32 sec_code_off; 59 - u32 sec_code_size; 60 - u32 code_entry_point; 61 - struct flcn_u64 data_dma_base; 62 - u32 data_size; 63 - }; 64 - 65 - void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); 66 - 67 - extern const struct acr_r352_ls_func acr_r361_ls_fecs_func; 68 - extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func; 69 - extern const struct acr_r352_ls_func acr_r361_ls_pmu_func; 70 - extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0; 71 - #endif
-117
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r361.h" 24 - 25 - #include <core/gpuobj.h> 26 - 27 - /* 28 - * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem 29 - * parameter. 30 - */ 31 - 32 - struct acr_r364_hsflcn_desc { 33 - union { 34 - u8 reserved_dmem[0x200]; 35 - u32 signatures[4]; 36 - } ucode_reserved_space; 37 - u32 wpr_region_id; 38 - u32 wpr_offset; 39 - u32 mmu_memory_range; 40 - struct { 41 - u32 no_regions; 42 - struct { 43 - u32 start_addr; 44 - u32 end_addr; 45 - u32 region_id; 46 - u32 read_mask; 47 - u32 write_mask; 48 - u32 client_mask; 49 - u32 shadow_mem_start_addr; 50 - } region_props[2]; 51 - } regions; 52 - u32 ucode_blob_size; 53 - u64 ucode_blob_base __aligned(8); 54 - struct { 55 - u32 vpr_enabled; 56 - u32 vpr_start; 57 - u32 vpr_end; 58 - u32 hdcp_policies; 59 - } vpr_desc; 60 - }; 61 - 62 - static void 63 - acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, 64 - void *_desc) 65 - { 66 - struct acr_r364_hsflcn_desc *desc = _desc; 67 - struct nvkm_gpuobj *ls_blob = acr->ls_blob; 68 - 69 - /* WPR region information if WPR is not fixed */ 70 - if (sb->wpr_size == 0) { 71 - u64 wpr_start = ls_blob->addr; 72 - u64 wpr_end = ls_blob->addr + ls_blob->size; 73 - 74 - if (acr->func->shadow_blob) 75 - wpr_start += ls_blob->size / 2; 76 - 77 - desc->wpr_region_id = 1; 78 - desc->regions.no_regions = 2; 79 - desc->regions.region_props[0].start_addr = wpr_start >> 8; 80 - desc->regions.region_props[0].end_addr = wpr_end >> 8; 81 - desc->regions.region_props[0].region_id = 1; 82 - desc->regions.region_props[0].read_mask = 0xf; 83 - desc->regions.region_props[0].write_mask = 0xc; 84 - desc->regions.region_props[0].client_mask = 0x2; 85 - if (acr->func->shadow_blob) 86 - desc->regions.region_props[0].shadow_mem_start_addr = 87 - ls_blob->addr >> 8; 88 - else 89 - desc->regions.region_props[0].shadow_mem_start_addr = 0; 90 - } else { 91 - desc->ucode_blob_base = ls_blob->addr; 92 - desc->ucode_blob_size = ls_blob->size; 93 - } 94 - } 95 - 96 - const struct acr_r352_func 97 - acr_r364_func = { 98 - .fixup_hs_desc = acr_r364_fixup_hs_desc, 99 - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, 100 - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 101 - .ls_ucode_img_load = acr_r352_ls_ucode_img_load, 102 - .ls_fill_headers = acr_r352_ls_fill_headers, 103 - .ls_write_wpr = acr_r352_ls_write_wpr, 104 - .ls_func = { 105 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, 106 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, 107 - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, 108 - }, 109 - }; 110 - 111 - 112 - struct nvkm_acr * 113 - acr_r364_new(unsigned long managed_falcons) 114 - { 115 - return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU, 116 - managed_falcons); 117 - }
-418
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r367.h" 24 - #include "acr_r361.h" 25 - #include "acr_r370.h" 26 - 27 - #include <core/gpuobj.h> 28 - 29 - /* 30 - * r367 ACR: new LS signature format requires a rewrite of LS firmware and 31 - * blob creation functions. Also the hsflcn_desc layout has changed slightly. 32 - */ 33 - 34 - #define LSF_LSB_DEPMAP_SIZE 11 35 - 36 - /** 37 - * struct acr_r367_lsf_lsb_header - LS firmware header 38 - * 39 - * See also struct acr_r352_lsf_lsb_header for documentation. 40 - */ 41 - struct acr_r367_lsf_lsb_header { 42 - /** 43 - * LS falcon signatures 44 - * @prd_keys: signature to use in production mode 45 - * @dgb_keys: signature to use in debug mode 46 - * @b_prd_present: whether the production key is present 47 - * @b_dgb_present: whether the debug key is present 48 - * @falcon_id: ID of the falcon the ucode applies to 49 - */ 50 - struct { 51 - u8 prd_keys[2][16]; 52 - u8 dbg_keys[2][16]; 53 - u32 b_prd_present; 54 - u32 b_dbg_present; 55 - u32 falcon_id; 56 - u32 supports_versioning; 57 - u32 version; 58 - u32 depmap_count; 59 - u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4]; 60 - u8 kdf[16]; 61 - } signature; 62 - u32 ucode_off; 63 - u32 ucode_size; 64 - u32 data_size; 65 - u32 bl_code_size; 66 - u32 bl_imem_off; 67 - u32 bl_data_off; 68 - u32 bl_data_size; 69 - u32 app_code_off; 70 - u32 app_code_size; 71 - u32 app_data_off; 72 - u32 app_data_size; 73 - u32 flags; 74 - }; 75 - 76 - /** 77 - * struct acr_r367_lsf_wpr_header - LS blob WPR Header 78 - * 79 - * See also struct acr_r352_lsf_wpr_header for documentation. 80 - */ 81 - struct acr_r367_lsf_wpr_header { 82 - u32 falcon_id; 83 - u32 lsb_offset; 84 - u32 bootstrap_owner; 85 - u32 lazy_bootstrap; 86 - u32 bin_version; 87 - u32 status; 88 - #define LSF_IMAGE_STATUS_NONE 0 89 - #define LSF_IMAGE_STATUS_COPY 1 90 - #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2 91 - #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3 92 - #define LSF_IMAGE_STATUS_VALIDATION_DONE 4 93 - #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5 94 - #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6 95 - #define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7 96 - }; 97 - 98 - /** 99 - * struct ls_ucode_img_r367 - ucode image augmented with r367 headers 100 - */ 101 - struct ls_ucode_img_r367 { 102 - struct ls_ucode_img base; 103 - 104 - const struct acr_r352_lsf_func *func; 105 - 106 - struct acr_r367_lsf_wpr_header wpr_header; 107 - struct acr_r367_lsf_lsb_header lsb_header; 108 - }; 109 - #define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base) 110 - 111 - struct ls_ucode_img * 112 - acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, 113 - const struct nvkm_secboot *sb, 114 - enum nvkm_secboot_falcon falcon_id) 115 - { 116 - const struct nvkm_subdev *subdev = acr->base.subdev; 117 - const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; 118 - struct ls_ucode_img_r367 *img; 119 - int ret; 120 - 121 - img = kzalloc(sizeof(*img), GFP_KERNEL); 122 - if (!img) 123 - return ERR_PTR(-ENOMEM); 124 - 125 - img->base.falcon_id = falcon_id; 126 - 127 - ret = func->load(sb, func->version_max, &img->base); 128 - if (ret < 0) { 129 - kfree(img->base.ucode_data); 130 - kfree(img->base.sig); 131 - kfree(img); 132 - return ERR_PTR(ret); 133 - } 134 - 135 - img->func = func->version[ret]; 136 - 137 - /* Check that the signature size matches our expectations... */ 138 - if (img->base.sig_size != sizeof(img->lsb_header.signature)) { 139 - nvkm_error(subdev, "invalid signature size for %s falcon!\n", 140 - nvkm_secboot_falcon_name[falcon_id]); 141 - return ERR_PTR(-EINVAL); 142 - } 143 - 144 - /* Copy signature to the right place */ 145 - memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size); 146 - 147 - /* not needed? the signature should already have the right value */ 148 - img->lsb_header.signature.falcon_id = falcon_id; 149 - 150 - return &img->base; 151 - } 152 - 153 - #define LSF_LSB_HEADER_ALIGN 256 154 - #define LSF_BL_DATA_ALIGN 256 155 - #define LSF_BL_DATA_SIZE_ALIGN 256 156 - #define LSF_BL_CODE_SIZE_ALIGN 256 157 - #define LSF_UCODE_DATA_ALIGN 4096 158 - 159 - static u32 160 - acr_r367_ls_img_fill_headers(struct acr_r352 *acr, 161 - struct ls_ucode_img_r367 *img, u32 offset) 162 - { 163 - struct ls_ucode_img *_img = &img->base; 164 - struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header; 165 - struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header; 166 - struct ls_ucode_img_desc *desc = &_img->ucode_desc; 167 - const struct acr_r352_lsf_func *func = img->func; 168 - 169 - /* Fill WPR header */ 170 - whdr->falcon_id = _img->falcon_id; 171 - whdr->bootstrap_owner = acr->base.boot_falcon; 172 - whdr->bin_version = lhdr->signature.version; 173 - whdr->status = LSF_IMAGE_STATUS_COPY; 174 - 175 - /* Skip bootstrapping falcons started by someone else than ACR */ 176 - if (acr->lazy_bootstrap & BIT(_img->falcon_id)) 177 - whdr->lazy_bootstrap = 1; 178 - 179 - /* Align, save off, and include an LSB header size */ 180 - offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN); 181 - whdr->lsb_offset = offset; 182 - offset += sizeof(*lhdr); 183 - 184 - /* 185 - * Align, save off, and include the original (static) ucode 186 - * image size 187 - */ 188 - offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN); 189 - _img->ucode_off = lhdr->ucode_off = offset; 190 - offset += _img->ucode_size; 191 - 192 - /* 193 - * For falcons that use a boot loader (BL), we append a loader 194 - * desc structure on the end of the ucode image and consider 195 - * this the boot loader data. The host will then copy the loader 196 - * desc args to this space within the WPR region (before locking 197 - * down) and the HS bin will then copy them to DMEM 0 for the 198 - * loader. 199 - */ 200 - lhdr->bl_code_size = ALIGN(desc->bootloader_size, 201 - LSF_BL_CODE_SIZE_ALIGN); 202 - lhdr->ucode_size = ALIGN(desc->app_resident_data_offset, 203 - LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size; 204 - lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) + 205 - lhdr->bl_code_size - lhdr->ucode_size; 206 - /* 207 - * Though the BL is located at 0th offset of the image, the VA 208 - * is different to make sure that it doesn't collide the actual 209 - * OS VA range 210 - */ 211 - lhdr->bl_imem_off = desc->bootloader_imem_offset; 212 - lhdr->app_code_off = desc->app_start_offset + 213 - desc->app_resident_code_offset; 214 - lhdr->app_code_size = desc->app_resident_code_size; 215 - lhdr->app_data_off = desc->app_start_offset + 216 - desc->app_resident_data_offset; 217 - lhdr->app_data_size = desc->app_resident_data_size; 218 - 219 - lhdr->flags = func->lhdr_flags; 220 - if (_img->falcon_id == acr->base.boot_falcon) 221 - lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX; 222 - 223 - /* Align and save off BL descriptor size */ 224 - lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN); 225 - 226 - /* 227 - * Align, save off, and include the additional BL data 228 - */ 229 - offset = ALIGN(offset, LSF_BL_DATA_ALIGN); 230 - lhdr->bl_data_off = offset; 231 - offset += lhdr->bl_data_size; 232 - 233 - return offset; 234 - } 235 - 236 - int 237 - acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs) 238 - { 239 - struct ls_ucode_img_r367 *img; 240 - struct list_head *l; 241 - u32 count = 0; 242 - u32 offset; 243 - 244 - /* Count the number of images to manage */ 245 - list_for_each(l, imgs) 246 - count++; 247 - 248 - /* 249 - * Start with an array of WPR headers at the base of the WPR. 250 - * The expectation here is that the secure falcon will do a single DMA 251 - * read of this array and cache it internally so it's ok to pack these. 252 - * Also, we add 1 to the falcon count to indicate the end of the array. 253 - */ 254 - offset = sizeof(img->wpr_header) * (count + 1); 255 - 256 - /* 257 - * Walk the managed falcons, accounting for the LSB structs 258 - * as well as the ucode images. 259 - */ 260 - list_for_each_entry(img, imgs, base.node) { 261 - offset = acr_r367_ls_img_fill_headers(acr, img, offset); 262 - } 263 - 264 - return offset; 265 - } 266 - 267 - int 268 - acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, 269 - struct nvkm_gpuobj *wpr_blob, u64 wpr_addr) 270 - { 271 - struct ls_ucode_img *_img; 272 - u32 pos = 0; 273 - u32 max_desc_size = 0; 274 - u8 *gdesc; 275 - 276 - list_for_each_entry(_img, imgs, node) { 277 - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); 278 - const struct acr_r352_lsf_func *ls_func = img->func; 279 - 280 - max_desc_size = max(max_desc_size, ls_func->bl_desc_size); 281 - } 282 - 283 - gdesc = kmalloc(max_desc_size, GFP_KERNEL); 284 - if (!gdesc) 285 - return -ENOMEM; 286 - 287 - nvkm_kmap(wpr_blob); 288 - 289 - list_for_each_entry(_img, imgs, node) { 290 - struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img); 291 - const struct acr_r352_lsf_func *ls_func = img->func; 292 - 293 - nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header, 294 - sizeof(img->wpr_header)); 295 - 296 - nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset, 297 - &img->lsb_header, sizeof(img->lsb_header)); 298 - 299 - /* Generate and write BL descriptor */ 300 - memset(gdesc, 0, ls_func->bl_desc_size); 301 - ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc); 302 - 303 - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off, 304 - gdesc, ls_func->bl_desc_size); 305 - 306 - /* Copy ucode */ 307 - nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off, 308 - _img->ucode_data, _img->ucode_size); 309 - 310 - pos += sizeof(img->wpr_header); 311 - } 312 - 313 - nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID); 314 - 315 - nvkm_done(wpr_blob); 316 - 317 - kfree(gdesc); 318 - 319 - return 0; 320 - } 321 - 322 - struct acr_r367_hsflcn_desc { 323 - u8 reserved_dmem[0x200]; 324 - u32 signatures[4]; 325 - u32 wpr_region_id; 326 - u32 wpr_offset; 327 - u32 mmu_memory_range; 328 - #define FLCN_ACR_MAX_REGIONS 2 329 - struct { 330 - u32 no_regions; 331 - struct { 332 - u32 start_addr; 333 - u32 end_addr; 334 - u32 region_id; 335 - u32 read_mask; 336 - u32 write_mask; 337 - u32 client_mask; 338 - u32 shadow_mem_start_addr; 339 - } region_props[FLCN_ACR_MAX_REGIONS]; 340 - } regions; 341 - u32 ucode_blob_size; 342 - u64 ucode_blob_base __aligned(8); 343 - struct { 344 - u32 vpr_enabled; 345 - u32 vpr_start; 346 - u32 vpr_end; 347 - u32 hdcp_policies; 348 - } vpr_desc; 349 - }; 350 - 351 - void 352 - acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb, 353 - void *_desc) 354 - { 355 - struct acr_r367_hsflcn_desc *desc = _desc; 356 - struct nvkm_gpuobj *ls_blob = acr->ls_blob; 357 - 358 - /* WPR region information if WPR is not fixed */ 359 - if (sb->wpr_size == 0) { 360 - u64 wpr_start = ls_blob->addr; 361 - u64 wpr_end = ls_blob->addr + ls_blob->size; 362 - 363 - if (acr->func->shadow_blob) 364 - wpr_start += ls_blob->size / 2; 365 - 366 - desc->wpr_region_id = 1; 367 - desc->regions.no_regions = 2; 368 - desc->regions.region_props[0].start_addr = wpr_start >> 8; 369 - desc->regions.region_props[0].end_addr = wpr_end >> 8; 370 - desc->regions.region_props[0].region_id = 1; 371 - desc->regions.region_props[0].read_mask = 0xf; 372 - desc->regions.region_props[0].write_mask = 0xc; 373 - desc->regions.region_props[0].client_mask = 0x2; 374 - if (acr->func->shadow_blob) 375 - desc->regions.region_props[0].shadow_mem_start_addr = 376 - ls_blob->addr >> 8; 377 - else 378 - desc->regions.region_props[0].shadow_mem_start_addr = 0; 379 - } else { 380 - desc->ucode_blob_base = ls_blob->addr; 381 - desc->ucode_blob_size = ls_blob->size; 382 - } 383 - } 384 - 385 - static const struct acr_r352_ls_func 386 - acr_r367_ls_sec2_func = { 387 - .load = acr_ls_ucode_load_sec2, 388 - .post_run = acr_ls_sec2_post_run, 389 - .version_max = 1, 390 - .version = { 391 - &acr_r361_ls_sec2_func_0, 392 - &acr_r370_ls_sec2_func_0, 393 - } 394 - }; 395 - 396 - const struct acr_r352_func 397 - acr_r367_func = { 398 - .fixup_hs_desc = acr_r367_fixup_hs_desc, 399 - .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc, 400 - .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc), 401 - .shadow_blob = true, 402 - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, 403 - .ls_fill_headers = acr_r367_ls_fill_headers, 404 - .ls_write_wpr = acr_r367_ls_write_wpr, 405 - .ls_func = { 406 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func, 407 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func, 408 - [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func, 409 - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func, 410 - }, 411 - }; 412 - 413 - struct nvkm_acr * 414 - acr_r367_new(enum nvkm_secboot_falcon boot_falcon, 415 - unsigned long managed_falcons) 416 - { 417 - return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons); 418 - }
-36
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_ACR_R367_H__ 24 - #define __NVKM_SECBOOT_ACR_R367_H__ 25 - 26 - #include "acr_r352.h" 27 - 28 - void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); 29 - 30 - struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *, 31 - const struct nvkm_secboot *, 32 - enum nvkm_secboot_falcon); 33 - int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *); 34 - int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *, 35 - struct nvkm_gpuobj *, u64); 36 - #endif
-168
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r370.h" 24 - #include "acr_r367.h" 25 - 26 - #include <core/msgqueue.h> 27 - #include <engine/falcon.h> 28 - #include <engine/sec2.h> 29 - 30 - static void 31 - acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr, 32 - const struct ls_ucode_img *img, u64 wpr_addr, 33 - void *_desc) 34 - { 35 - struct acr_r370_flcn_bl_desc *desc = _desc; 36 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 37 - u64 base, addr_code, addr_data; 38 - 39 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 40 - addr_code = base + pdesc->app_resident_code_offset; 41 - addr_data = base + pdesc->app_resident_data_offset; 42 - 43 - desc->ctx_dma = FALCON_DMAIDX_UCODE; 44 - desc->code_dma_base = u64_to_flcn64(addr_code); 45 - desc->non_sec_code_off = pdesc->app_resident_code_offset; 46 - desc->non_sec_code_size = pdesc->app_resident_code_size; 47 - desc->code_entry_point = pdesc->app_imem_entry; 48 - desc->data_dma_base = u64_to_flcn64(addr_data); 49 - desc->data_size = pdesc->app_resident_data_size; 50 - } 51 - 52 - static const struct acr_r352_lsf_func 53 - acr_r370_ls_fecs_func_0 = { 54 - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, 55 - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 56 - }; 57 - 58 - const struct acr_r352_ls_func 59 - acr_r370_ls_fecs_func = { 60 - .load = acr_ls_ucode_load_fecs, 61 - .version_max = 0, 62 - .version = { 63 - &acr_r370_ls_fecs_func_0, 64 - } 65 - }; 66 - 67 - static const struct acr_r352_lsf_func 68 - acr_r370_ls_gpccs_func_0 = { 69 - .generate_bl_desc = acr_r370_generate_flcn_bl_desc, 70 - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 71 - /* GPCCS will be loaded using PRI */ 72 - .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD, 73 - }; 74 - 75 - const struct acr_r352_ls_func 76 - acr_r370_ls_gpccs_func = { 77 - .load = acr_ls_ucode_load_gpccs, 78 - .version_max = 0, 79 - .version = { 80 - &acr_r370_ls_gpccs_func_0, 81 - } 82 - }; 83 - 84 - static void 85 - acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr, 86 - const struct ls_ucode_img *img, u64 wpr_addr, 87 - void *_desc) 88 - { 89 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 90 - const struct nvkm_sec2 *sec = acr->subdev->device->sec2; 91 - struct acr_r370_flcn_bl_desc *desc = _desc; 92 - u64 base, addr_code, addr_data; 93 - u32 addr_args; 94 - 95 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 96 - /* For some reason we should not add app_resident_code_offset here */ 97 - addr_code = base; 98 - addr_data = base + pdesc->app_resident_data_offset; 99 - addr_args = sec->falcon->data.limit; 100 - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; 101 - 102 - desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE; 103 - desc->code_dma_base = u64_to_flcn64(addr_code); 104 - desc->non_sec_code_off = pdesc->app_resident_code_offset; 105 - desc->non_sec_code_size = pdesc->app_resident_code_size; 106 - desc->code_entry_point = pdesc->app_imem_entry; 107 - desc->data_dma_base = u64_to_flcn64(addr_data); 108 - desc->data_size = pdesc->app_resident_data_size; 109 - desc->argc = 1; 110 - /* args are stored at the beginning of EMEM */ 111 - desc->argv = 0x01000000; 112 - } 113 - 114 - const struct acr_r352_lsf_func 115 - acr_r370_ls_sec2_func_0 = { 116 - .generate_bl_desc = acr_r370_generate_sec2_bl_desc, 117 - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 118 - }; 119 - 120 - const struct acr_r352_ls_func 121 - acr_r370_ls_sec2_func = { 122 - .load = acr_ls_ucode_load_sec2, 123 - .post_run = acr_ls_sec2_post_run, 124 - .version_max = 0, 125 - .version = { 126 - &acr_r370_ls_sec2_func_0, 127 - } 128 - }; 129 - 130 - void 131 - acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc, 132 - u64 offset) 133 - { 134 - struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc; 135 - 136 - bl_desc->ctx_dma = FALCON_DMAIDX_VIRT; 137 - bl_desc->non_sec_code_off = hdr->non_sec_code_off; 138 - bl_desc->non_sec_code_size = hdr->non_sec_code_size; 139 - bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0); 140 - bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0); 141 - bl_desc->code_entry_point = 0; 142 - bl_desc->code_dma_base = u64_to_flcn64(offset); 143 - bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base); 144 - bl_desc->data_size = hdr->data_size; 145 - } 146 - 147 - const struct acr_r352_func 148 - acr_r370_func = { 149 - .fixup_hs_desc = acr_r367_fixup_hs_desc, 150 - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, 151 - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 152 - .shadow_blob = true, 153 - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, 154 - .ls_fill_headers = acr_r367_ls_fill_headers, 155 - .ls_write_wpr = acr_r367_ls_write_wpr, 156 - .ls_func = { 157 - [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func, 158 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, 159 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, 160 - }, 161 - }; 162 - 163 - struct nvkm_acr * 164 - acr_r370_new(enum nvkm_secboot_falcon boot_falcon, 165 - unsigned long managed_falcons) 166 - { 167 - return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons); 168 - }
-50
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_ACR_R370_H__ 24 - #define __NVKM_SECBOOT_ACR_R370_H__ 25 - 26 - #include "priv.h" 27 - struct hsf_load_header; 28 - 29 - /* Same as acr_r361_flcn_bl_desc, plus argc/argv */ 30 - struct acr_r370_flcn_bl_desc { 31 - u32 reserved[4]; 32 - u32 signature[4]; 33 - u32 ctx_dma; 34 - struct flcn_u64 code_dma_base; 35 - u32 non_sec_code_off; 36 - u32 non_sec_code_size; 37 - u32 sec_code_off; 38 - u32 sec_code_size; 39 - u32 code_entry_point; 40 - struct flcn_u64 data_dma_base; 41 - u32 data_size; 42 - u32 argc; 43 - u32 argv; 44 - }; 45 - 46 - void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64); 47 - extern const struct acr_r352_ls_func acr_r370_ls_fecs_func; 48 - extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func; 49 - extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0; 50 - #endif
-94
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr_r370.h" 24 - #include "acr_r367.h" 25 - 26 - #include <core/msgqueue.h> 27 - #include <subdev/pmu.h> 28 - 29 - static void 30 - acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, 31 - const struct ls_ucode_img *img, u64 wpr_addr, 32 - void *_desc) 33 - { 34 - const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 35 - const struct nvkm_pmu *pmu = acr->subdev->device->pmu; 36 - struct acr_r370_flcn_bl_desc *desc = _desc; 37 - u64 base, addr_code, addr_data; 38 - u32 addr_args; 39 - 40 - base = wpr_addr + img->ucode_off + pdesc->app_start_offset; 41 - addr_code = base + pdesc->app_resident_code_offset; 42 - addr_data = base + pdesc->app_resident_data_offset; 43 - addr_args = pmu->falcon->data.limit; 44 - addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE; 45 - 46 - desc->ctx_dma = FALCON_DMAIDX_UCODE; 47 - desc->code_dma_base = u64_to_flcn64(addr_code); 48 - desc->non_sec_code_off = pdesc->app_resident_code_offset; 49 - desc->non_sec_code_size = pdesc->app_resident_code_size; 50 - desc->code_entry_point = pdesc->app_imem_entry; 51 - desc->data_dma_base = u64_to_flcn64(addr_data); 52 - desc->data_size = pdesc->app_resident_data_size; 53 - desc->argc = 1; 54 - desc->argv = addr_args; 55 - } 56 - 57 - static const struct acr_r352_lsf_func 58 - acr_r375_ls_pmu_func_0 = { 59 - .generate_bl_desc = acr_r375_generate_pmu_bl_desc, 60 - .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 61 - }; 62 - 63 - const struct acr_r352_ls_func 64 - acr_r375_ls_pmu_func = { 65 - .load = acr_ls_ucode_load_pmu, 66 - .post_run = acr_ls_pmu_post_run, 67 - .version_max = 0, 68 - .version = { 69 - &acr_r375_ls_pmu_func_0, 70 - } 71 - }; 72 - 73 - const struct acr_r352_func 74 - acr_r375_func = { 75 - .fixup_hs_desc = acr_r367_fixup_hs_desc, 76 - .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc, 77 - .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc), 78 - .shadow_blob = true, 79 - .ls_ucode_img_load = acr_r367_ls_ucode_img_load, 80 - .ls_fill_headers = acr_r367_ls_fill_headers, 81 - .ls_write_wpr = acr_r367_ls_write_wpr, 82 - .ls_func = { 83 - [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func, 84 - [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func, 85 - [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, 86 - }, 87 - }; 88 - 89 - struct nvkm_acr * 90 - acr_r375_new(enum nvkm_secboot_falcon boot_falcon, 91 - unsigned long managed_falcons) 92 - { 93 - return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons); 94 - }
-213
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - /* 24 - * Secure boot is the process by which NVIDIA-signed firmware is loaded into 25 - * some of the falcons of a GPU. For production devices this is the only way 26 - * for the firmware to access useful (but sensitive) registers. 27 - * 28 - * A Falcon microprocessor supporting advanced security modes can run in one of 29 - * three modes: 30 - * 31 - * - Non-secure (NS). In this mode, functionality is similar to Falcon 32 - * architectures before security modes were introduced (pre-Maxwell), but 33 - * capability is restricted. In particular, certain registers may be 34 - * inaccessible for reads and/or writes, and physical memory access may be 35 - * disabled (on certain Falcon instances). This is the only possible mode that 36 - * can be used if you don't have microcode cryptographically signed by NVIDIA. 37 - * 38 - * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's 39 - * not possible to read or write any Falcon internal state or Falcon registers 40 - * from outside the Falcon (for example, from the host system). The only way 41 - * to enable this mode is by loading microcode that has been signed by NVIDIA. 42 - * (The loading process involves tagging the IMEM block as secure, writing the 43 - * signature into a Falcon register, and starting execution. The hardware will 44 - * validate the signature, and if valid, grant HS privileges.) 45 - * 46 - * - Light Secure (LS). In this mode, the microprocessor has more privileges 47 - * than NS but fewer than HS. Some of the microprocessor state is visible to 48 - * host software to ease debugging. The only way to enable this mode is by HS 49 - * microcode enabling LS mode. Some privileges available to HS mode are not 50 - * available here. LS mode is introduced in GM20x. 51 - * 52 - * Secure boot consists in temporarily switching a HS-capable falcon (typically 53 - * PMU) into HS mode in order to validate the LS firmwares of managed falcons, 54 - * load them, and switch managed falcons into LS mode. Once secure boot 55 - * completes, no falcon remains in HS mode. 56 - * 57 - * Secure boot requires a write-protected memory region (WPR) which can only be 58 - * written by the secure falcon. On dGPU, the driver sets up the WPR region in 59 - * video memory. On Tegra, it is set up by the bootloader and its location and 60 - * size written into memory controller registers. 61 - * 62 - * The secure boot process takes place as follows: 63 - * 64 - * 1) A LS blob is constructed that contains all the LS firmwares we want to 65 - * load, along with their signatures and bootloaders. 66 - * 67 - * 2) A HS blob (also called ACR) is created that contains the signed HS 68 - * firmware in charge of loading the LS firmwares into their respective 69 - * falcons. 70 - * 71 - * 3) The HS blob is loaded (via its own bootloader) and executed on the 72 - * HS-capable falcon. It authenticates itself, switches the secure falcon to 73 - * HS mode and setup the WPR region around the LS blob (dGPU) or copies the 74 - * LS blob into the WPR region (Tegra). 75 - * 76 - * 4) The LS blob is now secure from all external tampering. The HS falcon 77 - * checks the signatures of the LS firmwares and, if valid, switches the 78 - * managed falcons to LS mode and makes them ready to run the LS firmware. 79 - * 80 - * 5) The managed falcons remain in LS mode and can be started. 81 - * 82 - */ 83 - 84 - #include "priv.h" 85 - #include "acr.h" 86 - 87 - #include <subdev/mc.h> 88 - #include <subdev/timer.h> 89 - #include <subdev/pmu.h> 90 - #include <engine/sec2.h> 91 - 92 - const char * 93 - nvkm_secboot_falcon_name[] = { 94 - [NVKM_SECBOOT_FALCON_PMU] = "PMU", 95 - [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>", 96 - [NVKM_SECBOOT_FALCON_FECS] = "FECS", 97 - [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS", 98 - [NVKM_SECBOOT_FALCON_SEC2] = "SEC2", 99 - [NVKM_SECBOOT_FALCON_END] = "<invalid>", 100 - }; 101 - /** 102 - * nvkm_secboot_reset() - reset specified falcon 103 - */ 104 - int 105 - nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask) 106 - { 107 - /* Unmanaged falcon? */ 108 - if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) { 109 - nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); 110 - return -EINVAL; 111 - } 112 - 113 - return sb->acr->func->reset(sb->acr, sb, falcon_mask); 114 - } 115 - 116 - /** 117 - * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed 118 - */ 119 - bool 120 - nvkm_secboot_is_managed(struct nvkm_secboot *sb, enum nvkm_secboot_falcon fid) 121 - { 122 - if (!sb) 123 - return false; 124 - 125 - return sb->acr->managed_falcons & BIT(fid); 126 - } 127 - 128 - static int 129 - nvkm_secboot_oneinit(struct nvkm_subdev *subdev) 130 - { 131 - struct nvkm_secboot *sb = nvkm_secboot(subdev); 132 - int ret = 0; 133 - 134 - switch (sb->acr->boot_falcon) { 135 - case NVKM_SECBOOT_FALCON_PMU: 136 - sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon; 137 - break; 138 - case NVKM_SECBOOT_FALCON_SEC2: 139 - /* we must keep SEC2 alive forever since ACR will run on it */ 140 - nvkm_engine_ref(&subdev->device->sec2->engine); 141 - sb->boot_falcon = subdev->device->sec2->falcon; 142 - sb->halt_falcon = subdev->device->pmu->falcon; 143 - break; 144 - default: 145 - nvkm_error(subdev, "Unmanaged boot falcon %s!\n", 146 - nvkm_secboot_falcon_name[sb->acr->boot_falcon]); 147 - return -EINVAL; 148 - } 149 - nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name); 150 - 151 - /* Call chip-specific init function */ 152 - if (sb->func->oneinit) 153 - ret = sb->func->oneinit(sb); 154 - if (ret) { 155 - nvkm_error(subdev, "Secure Boot initialization failed: %d\n", 156 - ret); 157 - return ret; 158 - } 159 - 160 - return 0; 161 - } 162 - 163 - static int 164 - nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend) 165 - { 166 - struct nvkm_secboot *sb = nvkm_secboot(subdev); 167 - int ret = 0; 168 - 169 - if (sb->func->fini) 170 - ret = sb->func->fini(sb, suspend); 171 - 172 - return ret; 173 - } 174 - 175 - static void * 176 - nvkm_secboot_dtor(struct nvkm_subdev *subdev) 177 - { 178 - struct nvkm_secboot *sb = nvkm_secboot(subdev); 179 - void *ret = NULL; 180 - 181 - if (sb->func->dtor) 182 - ret = sb->func->dtor(sb); 183 - 184 - return ret; 185 - } 186 - 187 - static const struct nvkm_subdev_func 188 - nvkm_secboot = { 189 - .oneinit = nvkm_secboot_oneinit, 190 - .fini = nvkm_secboot_fini, 191 - .dtor = nvkm_secboot_dtor, 192 - }; 193 - 194 - int 195 - nvkm_secboot_ctor(const struct nvkm_secboot_func *func, struct nvkm_acr *acr, 196 - struct nvkm_device *device, int index, 197 - struct nvkm_secboot *sb) 198 - { 199 - unsigned long fid; 200 - 201 - nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev); 202 - sb->func = func; 203 - sb->acr = acr; 204 - acr->subdev = &sb->subdev; 205 - 206 - nvkm_debug(&sb->subdev, "securely managed falcons:\n"); 207 - for_each_set_bit(fid, &sb->acr->managed_falcons, 208 - NVKM_SECBOOT_FALCON_END) 209 - nvkm_debug(&sb->subdev, "- %s\n", 210 - nvkm_secboot_falcon_name[fid]); 211 - 212 - return 0; 213 - }
-262
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - 24 - #include "acr.h" 25 - #include "gm200.h" 26 - 27 - #include <core/gpuobj.h> 28 - #include <subdev/fb.h> 29 - #include <engine/falcon.h> 30 - #include <subdev/mc.h> 31 - 32 - /** 33 - * gm200_secboot_run_blob() - run the given high-secure blob 34 - * 35 - */ 36 - int 37 - gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, 38 - struct nvkm_falcon *falcon) 39 - { 40 - struct gm200_secboot *gsb = gm200_secboot(sb); 41 - struct nvkm_subdev *subdev = &gsb->base.subdev; 42 - struct nvkm_vma *vma = NULL; 43 - u32 start_address; 44 - int ret; 45 - 46 - ret = nvkm_falcon_get(falcon, subdev); 47 - if (ret) 48 - return ret; 49 - 50 - /* Map the HS firmware so the HS bootloader can see it */ 51 - ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma); 52 - if (ret) { 53 - nvkm_falcon_put(falcon, subdev); 54 - return ret; 55 - } 56 - 57 - ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0); 58 - if (ret) 59 - goto end; 60 - 61 - /* Reset and set the falcon up */ 62 - ret = nvkm_falcon_reset(falcon); 63 - if (ret) 64 - goto end; 65 - nvkm_falcon_bind_context(falcon, gsb->inst); 66 - 67 - /* Load the HS bootloader into the falcon's IMEM/DMEM */ 68 - ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr); 69 - if (ret < 0) 70 - goto end; 71 - 72 - start_address = ret; 73 - 74 - /* Disable interrupts as we will poll for the HALT bit */ 75 - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false); 76 - 77 - /* Set default error value in mailbox register */ 78 - nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); 79 - 80 - /* Start the HS bootloader */ 81 - nvkm_falcon_set_start_addr(falcon, start_address); 82 - nvkm_falcon_start(falcon); 83 - ret = nvkm_falcon_wait_for_halt(falcon, 100); 84 - if (ret) 85 - goto end; 86 - 87 - /* 88 - * The mailbox register contains the (positive) error code - return this 89 - * to the caller 90 - */ 91 - ret = nvkm_falcon_rd32(falcon, 0x040); 92 - 93 - end: 94 - /* Reenable interrupts */ 95 - nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); 96 - 97 - /* We don't need the ACR firmware anymore */ 98 - nvkm_vmm_put(gsb->vmm, &vma); 99 - nvkm_falcon_put(falcon, subdev); 100 - 101 - return ret; 102 - } 103 - 104 - int 105 - gm200_secboot_oneinit(struct nvkm_secboot *sb) 106 - { 107 - struct gm200_secboot *gsb = gm200_secboot(sb); 108 - struct nvkm_device *device = sb->subdev.device; 109 - int ret; 110 - 111 - /* Allocate instance block and VM */ 112 - ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, 113 - &gsb->inst); 114 - if (ret) 115 - return ret; 116 - 117 - ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr", 118 - &gsb->vmm); 119 - if (ret) 120 - return ret; 121 - 122 - atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]); 123 - gsb->vmm->debug = gsb->base.subdev.debug; 124 - 125 - ret = nvkm_vmm_join(gsb->vmm, gsb->inst); 126 - if (ret) 127 - return ret; 128 - 129 - if (sb->acr->func->oneinit) { 130 - ret = sb->acr->func->oneinit(sb->acr, sb); 131 - if (ret) 132 - return ret; 133 - } 134 - 135 - return 0; 136 - } 137 - 138 - int 139 - gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) 140 - { 141 - int ret = 0; 142 - 143 - if (sb->acr->func->fini) 144 - ret = sb->acr->func->fini(sb->acr, sb, suspend); 145 - 146 - return ret; 147 - } 148 - 149 - void * 150 - gm200_secboot_dtor(struct nvkm_secboot *sb) 151 - { 152 - struct gm200_secboot *gsb = gm200_secboot(sb); 153 - 154 - sb->acr->func->dtor(sb->acr); 155 - 156 - nvkm_vmm_part(gsb->vmm, gsb->inst); 157 - nvkm_vmm_unref(&gsb->vmm); 158 - nvkm_memory_unref(&gsb->inst); 159 - 160 - return gsb; 161 - } 162 - 163 - 164 - static const struct nvkm_secboot_func 165 - gm200_secboot = { 166 - .dtor = gm200_secboot_dtor, 167 - .oneinit = gm200_secboot_oneinit, 168 - .fini = gm200_secboot_fini, 169 - .run_blob = gm200_secboot_run_blob, 170 - }; 171 - 172 - int 173 - gm200_secboot_new(struct nvkm_device *device, int index, 174 - struct nvkm_secboot **psb) 175 - { 176 - int ret; 177 - struct gm200_secboot *gsb; 178 - struct nvkm_acr *acr; 179 - 180 - acr = acr_r361_new(BIT(NVKM_SECBOOT_FALCON_FECS) | 181 - BIT(NVKM_SECBOOT_FALCON_GPCCS)); 182 - if (IS_ERR(acr)) 183 - return PTR_ERR(acr); 184 - 185 - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 186 - if (!gsb) { 187 - psb = NULL; 188 - return -ENOMEM; 189 - } 190 - *psb = &gsb->base; 191 - 192 - ret = nvkm_secboot_ctor(&gm200_secboot, acr, device, index, &gsb->base); 193 - if (ret) 194 - return ret; 195 - 196 - return 0; 197 - } 198 - 199 - 200 - MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin"); 201 - MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin"); 202 - MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin"); 203 - MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin"); 204 - MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin"); 205 - MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin"); 206 - MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin"); 207 - MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin"); 208 - MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin"); 209 - MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin"); 210 - MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin"); 211 - MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin"); 212 - MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin"); 213 - MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin"); 214 - MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin"); 215 - 216 - MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin"); 217 - MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin"); 218 - MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin"); 219 - MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin"); 220 - MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin"); 221 - MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin"); 222 - MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin"); 223 - MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin"); 224 - MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin"); 225 - MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin"); 226 - MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin"); 227 - MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin"); 228 - MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin"); 229 - MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin"); 230 - MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin"); 231 - 232 - MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin"); 233 - MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin"); 234 - MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin"); 235 - MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin"); 236 - MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin"); 237 - MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin"); 238 - MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin"); 239 - MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin"); 240 - MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin"); 241 - MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin"); 242 - MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin"); 243 - MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); 244 - MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); 245 - MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); 246 - MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); 247 - 248 - MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); 249 - MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); 250 - MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); 251 - MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); 252 - MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); 253 - MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); 254 - MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); 255 - MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); 256 - MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); 257 - MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); 258 - MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); 259 - MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); 260 - MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); 261 - MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); 262 - MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
-46
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_GM200_H__ 24 - #define __NVKM_SECBOOT_GM200_H__ 25 - 26 - #include "priv.h" 27 - 28 - struct gm200_secboot { 29 - struct nvkm_secboot base; 30 - 31 - /* Instance block & address space used for HS FW execution */ 32 - struct nvkm_memory *inst; 33 - struct nvkm_vmm *vmm; 34 - }; 35 - #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) 36 - 37 - int gm200_secboot_oneinit(struct nvkm_secboot *); 38 - int gm200_secboot_fini(struct nvkm_secboot *, bool); 39 - void *gm200_secboot_dtor(struct nvkm_secboot *); 40 - int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *, 41 - struct nvkm_falcon *); 42 - 43 - /* Tegra-only */ 44 - int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32); 45 - 46 - #endif
-148
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr.h" 24 - #include "gm200.h" 25 - 26 - #define TEGRA210_MC_BASE 0x70019000 27 - 28 - #ifdef CONFIG_ARCH_TEGRA 29 - #define MC_SECURITY_CARVEOUT2_CFG0 0xc58 30 - #define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c 31 - #define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60 32 - #define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64 33 - #define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1) 34 - /** 35 - * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra 36 - * 37 - * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region 38 - * is reserved from system memory by the bootloader and irreversibly locked. 39 - * This function reads the address and size of the pre-configured WPR region. 40 - */ 41 - int 42 - gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) 43 - { 44 - struct nvkm_secboot *sb = &gsb->base; 45 - void __iomem *mc; 46 - u32 cfg; 47 - 48 - mc = ioremap(mc_base, 0xd00); 49 - if (!mc) { 50 - nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); 51 - return -ENOMEM; 52 - } 53 - sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | 54 - ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); 55 - sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) 56 - << 17; 57 - cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); 58 - iounmap(mc); 59 - 60 - /* Check that WPR settings are valid */ 61 - if (sb->wpr_size == 0) { 62 - nvkm_error(&sb->subdev, "WPR region is empty\n"); 63 - return -EINVAL; 64 - } 65 - 66 - if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) { 67 - nvkm_error(&sb->subdev, "WPR region not locked\n"); 68 - return -EINVAL; 69 - } 70 - 71 - return 0; 72 - } 73 - #else 74 - int 75 - gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) 76 - { 77 - nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); 78 - return -EINVAL; 79 - } 80 - #endif 81 - 82 - static int 83 - gm20b_secboot_oneinit(struct nvkm_secboot *sb) 84 - { 85 - struct gm200_secboot *gsb = gm200_secboot(sb); 86 - int ret; 87 - 88 - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE); 89 - if (ret) 90 - return ret; 91 - 92 - return gm200_secboot_oneinit(sb); 93 - } 94 - 95 - static const struct nvkm_secboot_func 96 - gm20b_secboot = { 97 - .dtor = gm200_secboot_dtor, 98 - .oneinit = gm20b_secboot_oneinit, 99 - .fini = gm200_secboot_fini, 100 - .run_blob = gm200_secboot_run_blob, 101 - }; 102 - 103 - int 104 - gm20b_secboot_new(struct nvkm_device *device, int index, 105 - struct nvkm_secboot **psb) 106 - { 107 - int ret; 108 - struct gm200_secboot *gsb; 109 - struct nvkm_acr *acr; 110 - 111 - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | 112 - BIT(NVKM_SECBOOT_FALCON_PMU)); 113 - if (IS_ERR(acr)) 114 - return PTR_ERR(acr); 115 - /* Support the initial GM20B firmware release without PMU */ 116 - acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU); 117 - 118 - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 119 - if (!gsb) { 120 - psb = NULL; 121 - return -ENOMEM; 122 - } 123 - *psb = &gsb->base; 124 - 125 - ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base); 126 - if (ret) 127 - return ret; 128 - 129 - return 0; 130 - } 131 - 132 - #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 133 - MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin"); 134 - MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin"); 135 - MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin"); 136 - MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin"); 137 - MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin"); 138 - MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin"); 139 - MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin"); 140 - MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin"); 141 - MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin"); 142 - MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin"); 143 - MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin"); 144 - MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin"); 145 - MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); 146 - MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); 147 - MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); 148 - #endif
-264
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr.h" 24 - #include "gm200.h" 25 - 26 - #include "ls_ucode.h" 27 - #include "hs_ucode.h" 28 - #include <subdev/mc.h> 29 - #include <subdev/timer.h> 30 - #include <engine/falcon.h> 31 - #include <engine/nvdec.h> 32 - 33 - static bool 34 - gp102_secboot_scrub_required(struct nvkm_secboot *sb) 35 - { 36 - struct nvkm_subdev *subdev = &sb->subdev; 37 - struct nvkm_device *device = subdev->device; 38 - u32 reg; 39 - 40 - nvkm_wr32(device, 0x100cd0, 0x2); 41 - reg = nvkm_rd32(device, 0x100cd0); 42 - 43 - return (reg & BIT(4)); 44 - } 45 - 46 - static int 47 - gp102_run_secure_scrub(struct nvkm_secboot *sb) 48 - { 49 - struct nvkm_subdev *subdev = &sb->subdev; 50 - struct nvkm_device *device = subdev->device; 51 - struct nvkm_engine *engine; 52 - struct nvkm_falcon *falcon; 53 - void *scrub_image; 54 - struct fw_bin_header *hsbin_hdr; 55 - struct hsf_fw_header *fw_hdr; 56 - struct hsf_load_header *lhdr; 57 - void *scrub_data; 58 - int ret; 59 - 60 - nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n"); 61 - 62 - engine = nvkm_engine_ref(&device->nvdec[0]->engine); 63 - if (IS_ERR(engine)) 64 - return PTR_ERR(engine); 65 - falcon = device->nvdec[0]->falcon; 66 - 67 - nvkm_falcon_get(falcon, &sb->subdev); 68 - 69 - scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber"); 70 - if (IS_ERR(scrub_image)) 71 - return PTR_ERR(scrub_image); 72 - 73 - nvkm_falcon_reset(falcon); 74 - nvkm_falcon_bind_context(falcon, NULL); 75 - 76 - hsbin_hdr = scrub_image; 77 - fw_hdr = scrub_image + hsbin_hdr->header_offset; 78 - lhdr = scrub_image + fw_hdr->hdr_offset; 79 - scrub_data = scrub_image + hsbin_hdr->data_offset; 80 - 81 - nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off, 82 - lhdr->non_sec_code_size, 83 - lhdr->non_sec_code_off >> 8, 0, false); 84 - nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0], 85 - ALIGN(lhdr->apps[0], 0x100), 86 - lhdr->apps[1], 87 - lhdr->apps[0] >> 8, 0, true); 88 - nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0, 89 - lhdr->data_size, 0); 90 - 91 - kfree(scrub_image); 92 - 93 - nvkm_falcon_set_start_addr(falcon, 0x0); 94 - nvkm_falcon_start(falcon); 95 - 96 - ret = nvkm_falcon_wait_for_halt(falcon, 500); 97 - if (ret < 0) { 98 - nvkm_error(subdev, "failed to run VPR scrubber binary!\n"); 99 - ret = -ETIMEDOUT; 100 - goto end; 101 - } 102 - 103 - /* put nvdec in clean state - without reset it will remain in HS mode */ 104 - nvkm_falcon_reset(falcon); 105 - 106 - if (gp102_secboot_scrub_required(sb)) { 107 - nvkm_error(subdev, "VPR scrubber binary failed!\n"); 108 - ret = -EINVAL; 109 - goto end; 110 - } 111 - 112 - nvkm_debug(subdev, "VPR scrub successfully completed\n"); 113 - 114 - end: 115 - nvkm_falcon_put(falcon, &sb->subdev); 116 - nvkm_engine_unref(&engine); 117 - return ret; 118 - } 119 - 120 - static int 121 - gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, 122 - struct nvkm_falcon *falcon) 123 - { 124 - int ret; 125 - 126 - /* make sure the VPR region is unlocked */ 127 - if (gp102_secboot_scrub_required(sb)) { 128 - ret = gp102_run_secure_scrub(sb); 129 - if (ret) 130 - return ret; 131 - } 132 - 133 - return gm200_secboot_run_blob(sb, blob, falcon); 134 - } 135 - 136 - const struct nvkm_secboot_func 137 - gp102_secboot = { 138 - .dtor = gm200_secboot_dtor, 139 - .oneinit = gm200_secboot_oneinit, 140 - .fini = gm200_secboot_fini, 141 - .run_blob = gp102_secboot_run_blob, 142 - }; 143 - 144 - int 145 - gp102_secboot_new(struct nvkm_device *device, int index, 146 - struct nvkm_secboot **psb) 147 - { 148 - int ret; 149 - struct gm200_secboot *gsb; 150 - struct nvkm_acr *acr; 151 - 152 - acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2, 153 - BIT(NVKM_SECBOOT_FALCON_FECS) | 154 - BIT(NVKM_SECBOOT_FALCON_GPCCS) | 155 - BIT(NVKM_SECBOOT_FALCON_SEC2)); 156 - if (IS_ERR(acr)) 157 - return PTR_ERR(acr); 158 - 159 - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 160 - if (!gsb) { 161 - psb = NULL; 162 - return -ENOMEM; 163 - } 164 - *psb = &gsb->base; 165 - 166 - ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); 167 - if (ret) 168 - return ret; 169 - 170 - return 0; 171 - } 172 - 173 - MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin"); 174 - MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin"); 175 - MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin"); 176 - MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin"); 177 - MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin"); 178 - MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin"); 179 - MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin"); 180 - MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin"); 181 - MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin"); 182 - MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin"); 183 - MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin"); 184 - MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin"); 185 - MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin"); 186 - MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin"); 187 - MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin"); 188 - MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin"); 189 - MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); 190 - MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); 191 - MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); 192 - MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); 193 - MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin"); 194 - MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin"); 195 - MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin"); 196 - MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); 197 - MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); 198 - MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); 199 - MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin"); 200 - MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin"); 201 - MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin"); 202 - MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin"); 203 - MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin"); 204 - MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin"); 205 - MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin"); 206 - MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin"); 207 - MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin"); 208 - MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin"); 209 - MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin"); 210 - MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin"); 211 - MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin"); 212 - MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin"); 213 - MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); 214 - MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); 215 - MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); 216 - MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin"); 217 - MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin"); 218 - MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin"); 219 - MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); 220 - MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); 221 - MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); 222 - MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin"); 223 - MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin"); 224 - MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin"); 225 - MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin"); 226 - MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin"); 227 - MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin"); 228 - MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin"); 229 - MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin"); 230 - MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin"); 231 - MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin"); 232 - MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin"); 233 - MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin"); 234 - MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin"); 235 - MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin"); 236 - MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); 237 - MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); 238 - MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); 239 - MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin"); 240 - MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin"); 241 - MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin"); 242 - MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); 243 - MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); 244 - MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); 245 - MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin"); 246 - MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin"); 247 - MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin"); 248 - MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin"); 249 - MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin"); 250 - MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin"); 251 - MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin"); 252 - MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin"); 253 - MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin"); 254 - MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin"); 255 - MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin"); 256 - MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin"); 257 - MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin"); 258 - MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin"); 259 - MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); 260 - MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); 261 - MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); 262 - MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin"); 263 - MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin"); 264 - MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
-88
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
··· 1 - /* 2 - * Copyright 2017 Red Hat Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - */ 22 - #include "gm200.h" 23 - #include "acr.h" 24 - 25 - int 26 - gp108_secboot_new(struct nvkm_device *device, int index, 27 - struct nvkm_secboot **psb) 28 - { 29 - struct gm200_secboot *gsb; 30 - struct nvkm_acr *acr; 31 - 32 - acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2, 33 - BIT(NVKM_SECBOOT_FALCON_FECS) | 34 - BIT(NVKM_SECBOOT_FALCON_GPCCS) | 35 - BIT(NVKM_SECBOOT_FALCON_SEC2)); 36 - if (IS_ERR(acr)) 37 - return PTR_ERR(acr); 38 - 39 - if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) { 40 - acr->func->dtor(acr); 41 - return -ENOMEM; 42 - } 43 - *psb = &gsb->base; 44 - 45 - return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base); 46 - } 47 - 48 - MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin"); 49 - MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin"); 50 - MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin"); 51 - MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin"); 52 - MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin"); 53 - MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin"); 54 - MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin"); 55 - MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin"); 56 - MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin"); 57 - MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin"); 58 - MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin"); 59 - MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin"); 60 - MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin"); 61 - MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin"); 62 - MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin"); 63 - MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin"); 64 - MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin"); 65 - MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin"); 66 - MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin"); 67 - MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin"); 68 - 69 - MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin"); 70 - MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin"); 71 - MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin"); 72 - MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin"); 73 - MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin"); 74 - MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin"); 75 - MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin"); 76 - MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin"); 77 - MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin"); 78 - MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin"); 79 - MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin"); 80 - MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin"); 81 - MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin"); 82 - MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin"); 83 - MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin"); 84 - MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin"); 85 - MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); 86 - MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin"); 87 - MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin"); 88 - MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
-95
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "acr.h" 24 - #include "gm200.h" 25 - 26 - #define TEGRA186_MC_BASE 0x02c10000 27 - 28 - static int 29 - gp10b_secboot_oneinit(struct nvkm_secboot *sb) 30 - { 31 - struct gm200_secboot *gsb = gm200_secboot(sb); 32 - int ret; 33 - 34 - ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE); 35 - if (ret) 36 - return ret; 37 - 38 - return gm200_secboot_oneinit(sb); 39 - } 40 - 41 - static const struct nvkm_secboot_func 42 - gp10b_secboot = { 43 - .dtor = gm200_secboot_dtor, 44 - .oneinit = gp10b_secboot_oneinit, 45 - .fini = gm200_secboot_fini, 46 - .run_blob = gm200_secboot_run_blob, 47 - }; 48 - 49 - int 50 - gp10b_secboot_new(struct nvkm_device *device, int index, 51 - struct nvkm_secboot **psb) 52 - { 53 - int ret; 54 - struct gm200_secboot *gsb; 55 - struct nvkm_acr *acr; 56 - 57 - acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | 58 - BIT(NVKM_SECBOOT_FALCON_GPCCS) | 59 - BIT(NVKM_SECBOOT_FALCON_PMU)); 60 - if (IS_ERR(acr)) 61 - return PTR_ERR(acr); 62 - 63 - gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); 64 - if (!gsb) { 65 - psb = NULL; 66 - return -ENOMEM; 67 - } 68 - *psb = &gsb->base; 69 - 70 - ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base); 71 - if (ret) 72 - return ret; 73 - 74 - return 0; 75 - } 76 - 77 - #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) 78 - MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); 79 - MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); 80 - MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); 81 - MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); 82 - MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); 83 - MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); 84 - MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); 85 - MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); 86 - MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); 87 - MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); 88 - MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); 89 - MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); 90 - MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); 91 - MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); 92 - MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); 93 - MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); 94 - MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); 95 - #endif
-97
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #include "hs_ucode.h" 24 - #include "ls_ucode.h" 25 - #include "acr.h" 26 - 27 - #include <engine/falcon.h> 28 - 29 - /** 30 - * hs_ucode_patch_signature() - patch HS blob with correct signature for 31 - * specified falcon. 32 - */ 33 - static void 34 - hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image, 35 - bool new_format) 36 - { 37 - struct fw_bin_header *hsbin_hdr = acr_image; 38 - struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset; 39 - void *hs_data = acr_image + hsbin_hdr->data_offset; 40 - void *sig; 41 - u32 sig_size; 42 - u32 patch_loc, patch_sig; 43 - 44 - /* 45 - * I had the brilliant idea to "improve" the binary format by 46 - * removing this useless indirection. However to make NVIDIA files 47 - * directly compatible, let's support both format. 48 - */ 49 - if (new_format) { 50 - patch_loc = fw_hdr->patch_loc; 51 - patch_sig = fw_hdr->patch_sig; 52 - } else { 53 - patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc); 54 - patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig); 55 - } 56 - 57 - /* Falcon in debug or production mode? */ 58 - if (falcon->debug) { 59 - sig = acr_image + fw_hdr->sig_dbg_offset; 60 - sig_size = fw_hdr->sig_dbg_size; 61 - } else { 62 - sig = acr_image + fw_hdr->sig_prod_offset; 63 - sig_size = fw_hdr->sig_prod_size; 64 - } 65 - 66 - /* Patch signature */ 67 - memcpy(hs_data + patch_loc, sig + patch_sig, sig_size); 68 - } 69 - 70 - void * 71 - hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon, 72 - const char *fw) 73 - { 74 - void *acr_image; 75 - bool new_format; 76 - 77 - acr_image = nvkm_acr_load_firmware(subdev, fw, 0); 78 - if (IS_ERR(acr_image)) 79 - return acr_image; 80 - 81 - /* detect the format to define how signature should be patched */ 82 - switch (((u32 *)acr_image)[0]) { 83 - case 0x3b1d14f0: 84 - new_format = true; 85 - break; 86 - case 0x000010de: 87 - new_format = false; 88 - break; 89 - default: 90 - nvkm_error(subdev, "unknown header for HS blob %s\n", fw); 91 - return ERR_PTR(-EINVAL); 92 - } 93 - 94 - hs_ucode_patch_signature(falcon, acr_image, new_format); 95 - 96 - return acr_image; 97 - }
-81
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
··· 1 - /* 2 - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_HS_UCODE_H__ 24 - #define __NVKM_SECBOOT_HS_UCODE_H__ 25 - 26 - #include <core/os.h> 27 - #include <core/subdev.h> 28 - 29 - struct nvkm_falcon; 30 - 31 - /** 32 - * struct hsf_fw_header - HS firmware descriptor 33 - * @sig_dbg_offset: offset of the debug signature 34 - * @sig_dbg_size: size of the debug signature 35 - * @sig_prod_offset: offset of the production signature 36 - * @sig_prod_size: size of the production signature 37 - * @patch_loc: offset of the offset (sic) of where the signature is 38 - * @patch_sig: offset of the offset (sic) to add to sig_*_offset 39 - * @hdr_offset: offset of the load header (see struct hs_load_header) 40 - * @hdr_size: size of above header 41 - * 42 - * This structure is embedded in the HS firmware image at 43 - * hs_bin_hdr.header_offset. 44 - */ 45 - struct hsf_fw_header { 46 - u32 sig_dbg_offset; 47 - u32 sig_dbg_size; 48 - u32 sig_prod_offset; 49 - u32 sig_prod_size; 50 - u32 patch_loc; 51 - u32 patch_sig; 52 - u32 hdr_offset; 53 - u32 hdr_size; 54 - }; 55 - 56 - /** 57 - * struct hsf_load_header - HS firmware load header 58 - */ 59 - struct hsf_load_header { 60 - u32 non_sec_code_off; 61 - u32 non_sec_code_size; 62 - u32 data_dma_base; 63 - u32 data_size; 64 - u32 num_apps; 65 - /* 66 - * Organized as follows: 67 - * - app0_code_off 68 - * - app1_code_off 69 - * - ... 70 - * - appn_code_off 71 - * - app0_code_size 72 - * - app1_code_size 73 - * - ... 74 - */ 75 - u32 apps[0]; 76 - }; 77 - 78 - void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *, 79 - const char *); 80 - 81 - #endif
-161
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
··· 1 - /* 2 - * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_LS_UCODE_H__ 24 - #define __NVKM_SECBOOT_LS_UCODE_H__ 25 - 26 - #include <core/os.h> 27 - #include <core/subdev.h> 28 - #include <subdev/secboot.h> 29 - 30 - struct nvkm_acr; 31 - 32 - /** 33 - * struct ls_ucode_img_desc - descriptor of firmware image 34 - * @descriptor_size: size of this descriptor 35 - * @image_size: size of the whole image 36 - * @bootloader_start_offset: start offset of the bootloader in ucode image 37 - * @bootloader_size: size of the bootloader 38 - * @bootloader_imem_offset: start off set of the bootloader in IMEM 39 - * @bootloader_entry_point: entry point of the bootloader in IMEM 40 - * @app_start_offset: start offset of the LS firmware 41 - * @app_size: size of the LS firmware's code and data 42 - * @app_imem_offset: offset of the app in IMEM 43 - * @app_imem_entry: entry point of the app in IMEM 44 - * @app_dmem_offset: offset of the data in DMEM 45 - * @app_resident_code_offset: offset of app code from app_start_offset 46 - * @app_resident_code_size: size of the code 47 - * @app_resident_data_offset: offset of data from app_start_offset 48 - * @app_resident_data_size: size of data 49 - * 50 - * A firmware image contains the code, data, and bootloader of a given LS 51 - * falcon in a single blob. This structure describes where everything is. 52 - * 53 - * This can be generated from a (bootloader, code, data) set if they have 54 - * been loaded separately, or come directly from a file. 55 - */ 56 - struct ls_ucode_img_desc { 57 - u32 descriptor_size; 58 - u32 image_size; 59 - u32 tools_version; 60 - u32 app_version; 61 - char date[64]; 62 - u32 bootloader_start_offset; 63 - u32 bootloader_size; 64 - u32 bootloader_imem_offset; 65 - u32 bootloader_entry_point; 66 - u32 app_start_offset; 67 - u32 app_size; 68 - u32 app_imem_offset; 69 - u32 app_imem_entry; 70 - u32 app_dmem_offset; 71 - u32 app_resident_code_offset; 72 - u32 app_resident_code_size; 73 - u32 app_resident_data_offset; 74 - u32 app_resident_data_size; 75 - u32 nb_overlays; 76 - struct {u32 start; u32 size; } load_ovl[64]; 77 - u32 compressed; 78 - }; 79 - 80 - /** 81 - * struct ls_ucode_img - temporary storage for loaded LS firmwares 82 - * @node: to link within lsf_ucode_mgr 83 - * @falcon_id: ID of the falcon this LS firmware is for 84 - * @ucode_desc: loaded or generated map of ucode_data 85 - * @ucode_data: firmware payload (code and data) 86 - * @ucode_size: size in bytes of data in ucode_data 87 - * @ucode_off: offset of the ucode in ucode_data 88 - * @sig: signature for this firmware 89 - * @sig:size: size of the signature in bytes 90 - * 91 - * Preparing the WPR LS blob requires information about all the LS firmwares 92 - * (size, etc) to be known. This structure contains all the data of one LS 93 - * firmware. 94 - */ 95 - struct ls_ucode_img { 96 - struct list_head node; 97 - enum nvkm_secboot_falcon falcon_id; 98 - 99 - struct ls_ucode_img_desc ucode_desc; 100 - u8 *ucode_data; 101 - u32 ucode_size; 102 - u32 ucode_off; 103 - 104 - u8 *sig; 105 - u32 sig_size; 106 - }; 107 - 108 - /** 109 - * struct fw_bin_header - header of firmware files 110 - * @bin_magic: always 0x3b1d14f0 111 - * @bin_ver: version of the bin format 112 - * @bin_size: entire image size including this header 113 - * @header_offset: offset of the firmware/bootloader header in the file 114 - * @data_offset: offset of the firmware/bootloader payload in the file 115 - * @data_size: size of the payload 116 - * 117 - * This header is located at the beginning of the HS firmware and HS bootloader 118 - * files, to describe where the headers and data can be found. 119 - */ 120 - struct fw_bin_header { 121 - u32 bin_magic; 122 - u32 bin_ver; 123 - u32 bin_size; 124 - u32 header_offset; 125 - u32 data_offset; 126 - u32 data_size; 127 - }; 128 - 129 - /** 130 - * struct fw_bl_desc - firmware bootloader descriptor 131 - * @start_tag: starting tag of bootloader 132 - * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc 133 - * @code_off: offset of code section 134 - * @code_size: size of code section 135 - * @data_off: offset of data section 136 - * @data_size: size of data section 137 - * 138 - * This structure is embedded in bootloader firmware files at to describe the 139 - * IMEM and DMEM layout expected by the bootloader. 140 - */ 141 - struct fw_bl_desc { 142 - u32 start_tag; 143 - u32 dmem_load_off; 144 - u32 code_off; 145 - u32 code_size; 146 - u32 data_off; 147 - u32 data_size; 148 - }; 149 - 150 - int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int, 151 - struct ls_ucode_img *); 152 - int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int, 153 - struct ls_ucode_img *); 154 - int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int, 155 - struct ls_ucode_img *); 156 - int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); 157 - int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int, 158 - struct ls_ucode_img *); 159 - int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); 160 - 161 - #endif
-160
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - 24 - #include "ls_ucode.h" 25 - #include "acr.h" 26 - 27 - #include <core/firmware.h> 28 - 29 - #define BL_DESC_BLK_SIZE 256 30 - /** 31 - * Build a ucode image and descriptor from provided bootloader, code and data. 32 - * 33 - * @bl: bootloader image, including 16-bytes descriptor 34 - * @code: LS firmware code segment 35 - * @data: LS firmware data segment 36 - * @desc: ucode descriptor to be written 37 - * 38 - * Return: allocated ucode image with corresponding descriptor information. desc 39 - * is also updated to contain the right offsets within returned image. 40 - */ 41 - static void * 42 - ls_ucode_img_build(const struct firmware *bl, const struct firmware *code, 43 - const struct firmware *data, struct ls_ucode_img_desc *desc) 44 - { 45 - struct fw_bin_header *bin_hdr = (void *)bl->data; 46 - struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset; 47 - void *bl_data = (void *)bl->data + bin_hdr->data_offset; 48 - u32 pos = 0; 49 - void *image; 50 - 51 - desc->bootloader_start_offset = pos; 52 - desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32)); 53 - desc->bootloader_imem_offset = bl_desc->start_tag * 256; 54 - desc->bootloader_entry_point = bl_desc->start_tag * 256; 55 - 56 - pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE); 57 - desc->app_start_offset = pos; 58 - desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) + 59 - ALIGN(data->size, BL_DESC_BLK_SIZE); 60 - desc->app_imem_offset = 0; 61 - desc->app_imem_entry = 0; 62 - desc->app_dmem_offset = 0; 63 - desc->app_resident_code_offset = 0; 64 - desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE); 65 - 66 - pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE); 67 - desc->app_resident_data_offset = pos - desc->app_start_offset; 68 - desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE); 69 - 70 - desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) + 71 - desc->app_size; 72 - 73 - image = kzalloc(desc->image_size, GFP_KERNEL); 74 - if (!image) 75 - return ERR_PTR(-ENOMEM); 76 - 77 - memcpy(image + desc->bootloader_start_offset, bl_data, 78 - bl_desc->code_size); 79 - memcpy(image + desc->app_start_offset, code->data, code->size); 80 - memcpy(image + desc->app_start_offset + desc->app_resident_data_offset, 81 - data->data, data->size); 82 - 83 - return image; 84 - } 85 - 86 - /** 87 - * ls_ucode_img_load_gr() - load and prepare a LS GR ucode image 88 - * 89 - * Load the LS microcode, bootloader and signature and pack them into a single 90 - * blob. Also generate the corresponding ucode descriptor. 91 - */ 92 - static int 93 - ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver, 94 - struct ls_ucode_img *img, const char *falcon_name) 95 - { 96 - const struct firmware *bl, *code, *data, *sig; 97 - char f[64]; 98 - int ret; 99 - 100 - snprintf(f, sizeof(f), "gr/%s_bl", falcon_name); 101 - ret = nvkm_firmware_get(subdev, f, &bl); 102 - if (ret) 103 - goto error; 104 - 105 - snprintf(f, sizeof(f), "gr/%s_inst", falcon_name); 106 - ret = nvkm_firmware_get(subdev, f, &code); 107 - if (ret) 108 - goto free_bl; 109 - 110 - snprintf(f, sizeof(f), "gr/%s_data", falcon_name); 111 - ret = nvkm_firmware_get(subdev, f, &data); 112 - if (ret) 113 - goto free_inst; 114 - 115 - snprintf(f, sizeof(f), "gr/%s_sig", falcon_name); 116 - ret = nvkm_firmware_get(subdev, f, &sig); 117 - if (ret) 118 - goto free_data; 119 - 120 - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 121 - if (!img->sig) { 122 - ret = -ENOMEM; 123 - goto free_sig; 124 - } 125 - img->sig_size = sig->size; 126 - 127 - img->ucode_data = ls_ucode_img_build(bl, code, data, 128 - &img->ucode_desc); 129 - if (IS_ERR(img->ucode_data)) { 130 - kfree(img->sig); 131 - ret = PTR_ERR(img->ucode_data); 132 - goto free_sig; 133 - } 134 - img->ucode_size = img->ucode_desc.image_size; 135 - 136 - free_sig: 137 - nvkm_firmware_put(sig); 138 - free_data: 139 - nvkm_firmware_put(data); 140 - free_inst: 141 - nvkm_firmware_put(code); 142 - free_bl: 143 - nvkm_firmware_put(bl); 144 - error: 145 - return ret; 146 - } 147 - 148 - int 149 - acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver, 150 - struct ls_ucode_img *img) 151 - { 152 - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs"); 153 - } 154 - 155 - int 156 - acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver, 157 - struct ls_ucode_img *img) 158 - { 159 - return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs"); 160 - }
-177
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
··· 1 - /* 2 - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - 24 - #include "ls_ucode.h" 25 - #include "acr.h" 26 - 27 - #include <core/firmware.h> 28 - #include <core/msgqueue.h> 29 - #include <subdev/pmu.h> 30 - #include <engine/sec2.h> 31 - #include <subdev/mc.h> 32 - #include <subdev/timer.h> 33 - 34 - /** 35 - * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw 36 - * 37 - * Load the LS microcode, desc and signature and pack them into a single 38 - * blob. 39 - */ 40 - static int 41 - acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, 42 - int maxver, struct ls_ucode_img *img) 43 - { 44 - const struct firmware *image, *desc, *sig; 45 - char f[64]; 46 - int ver, ret; 47 - 48 - snprintf(f, sizeof(f), "%s/image", name); 49 - ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image); 50 - if (ver < 0) 51 - return ver; 52 - img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL); 53 - nvkm_firmware_put(image); 54 - if (!img->ucode_data) 55 - return -ENOMEM; 56 - 57 - snprintf(f, sizeof(f), "%s/desc", name); 58 - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc); 59 - if (ret < 0) 60 - return ret; 61 - memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc)); 62 - img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256); 63 - nvkm_firmware_put(desc); 64 - 65 - snprintf(f, sizeof(f), "%s/sig", name); 66 - ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig); 67 - if (ret < 0) 68 - return ret; 69 - img->sig_size = sig->size; 70 - img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 71 - nvkm_firmware_put(sig); 72 - if (!img->sig) 73 - return -ENOMEM; 74 - 75 - return ver; 76 - } 77 - 78 - static int 79 - acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, 80 - struct nvkm_falcon *falcon, u32 addr_args) 81 - { 82 - struct nvkm_device *device = falcon->owner->device; 83 - u8 buf[NVKM_MSGQUEUE_CMDLINE_SIZE]; 84 - 85 - memset(buf, 0, sizeof(buf)); 86 - nvkm_msgqueue_write_cmdline(queue, buf); 87 - nvkm_falcon_load_dmem(falcon, buf, addr_args, sizeof(buf), 0); 88 - /* rearm the queue so it will wait for the init message */ 89 - nvkm_msgqueue_reinit(queue); 90 - 91 - /* Enable interrupts */ 92 - nvkm_falcon_wr32(falcon, 0x10, 0xff); 93 - nvkm_mc_intr_mask(device, falcon->owner->index, true); 94 - 95 - /* Start LS firmware on boot falcon */ 96 - nvkm_falcon_start(falcon); 97 - 98 - return 0; 99 - } 100 - 101 - int 102 - acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver, 103 - struct ls_ucode_img *img) 104 - { 105 - struct nvkm_pmu *pmu = sb->subdev.device->pmu; 106 - int ret; 107 - 108 - ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img); 109 - if (ret) 110 - return ret; 111 - 112 - /* Allocate the PMU queue corresponding to the FW version */ 113 - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon, 114 - sb, &pmu->queue); 115 - if (ret) 116 - return ret; 117 - 118 - return 0; 119 - } 120 - 121 - int 122 - acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) 123 - { 124 - struct nvkm_device *device = sb->subdev.device; 125 - struct nvkm_pmu *pmu = device->pmu; 126 - u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; 127 - int ret; 128 - 129 - ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); 130 - if (ret) 131 - return ret; 132 - 133 - nvkm_debug(&sb->subdev, "%s started\n", 134 - nvkm_secboot_falcon_name[acr->boot_falcon]); 135 - 136 - return 0; 137 - } 138 - 139 - int 140 - acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver, 141 - struct ls_ucode_img *img) 142 - { 143 - struct nvkm_sec2 *sec = sb->subdev.device->sec2; 144 - int ver, ret; 145 - 146 - ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img); 147 - if (ver < 0) 148 - return ver; 149 - 150 - /* Allocate the PMU queue corresponding to the FW version */ 151 - ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, 152 - sb, &sec->queue); 153 - if (ret) 154 - return ret; 155 - 156 - return ver; 157 - } 158 - 159 - int 160 - acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) 161 - { 162 - const struct nvkm_subdev *subdev = &sb->subdev; 163 - struct nvkm_device *device = subdev->device; 164 - struct nvkm_sec2 *sec = device->sec2; 165 - /* on SEC arguments are always at the beginning of EMEM */ 166 - const u32 addr_args = 0x01000000; 167 - int ret; 168 - 169 - ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); 170 - if (ret) 171 - return ret; 172 - 173 - nvkm_debug(&sb->subdev, "%s started\n", 174 - nvkm_secboot_falcon_name[acr->boot_falcon]); 175 - 176 - return 0; 177 - }
-65
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
··· 1 - /* 2 - * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 - * DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - #ifndef __NVKM_SECBOOT_PRIV_H__ 24 - #define __NVKM_SECBOOT_PRIV_H__ 25 - 26 - #include <subdev/secboot.h> 27 - #include <subdev/mmu.h> 28 - struct nvkm_gpuobj; 29 - 30 - struct nvkm_secboot_func { 31 - int (*oneinit)(struct nvkm_secboot *); 32 - int (*fini)(struct nvkm_secboot *, bool suspend); 33 - void *(*dtor)(struct nvkm_secboot *); 34 - int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *, 35 - struct nvkm_falcon *); 36 - }; 37 - 38 - int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, 39 - struct nvkm_device *, int, struct nvkm_secboot *); 40 - int nvkm_secboot_falcon_reset(struct nvkm_secboot *); 41 - int nvkm_secboot_falcon_run(struct nvkm_secboot *); 42 - 43 - extern const struct nvkm_secboot_func gp102_secboot; 44 - 45 - struct flcn_u64 { 46 - u32 lo; 47 - u32 hi; 48 - }; 49 - 50 - static inline u64 flcn64_to_u64(const struct flcn_u64 f) 51 - { 52 - return ((u64)f.hi) << 32 | f.lo; 53 - } 54 - 55 - static inline struct flcn_u64 u64_to_flcn64(u64 u) 56 - { 57 - struct flcn_u64 ret; 58 - 59 - ret.hi = upper_32_bits(u); 60 - ret.lo = lower_32_bits(u); 61 - 62 - return ret; 63 - } 64 - 65 - #endif