Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau/disp: introduce acquire/release display path methods

These exist to give NVKM information on the set of display paths that
the DD needs to be active at any given time.

Previously, the supervisor attempted to determine this solely from OR
state, but there's a few configurations where this information on its
own isn't enough to determine the specific display paths in question:

- ANX9805, where the PIOR protocol for both DP and TMDS is TMDS.
- On a device using DCB Switched Outputs.
- On GM20x and newer, with a crossbar between the SOR and macro links.

After this commit, the DD tells NVKM *exactly* which display path it's
attempting a modeset on.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+287 -40
+9
drivers/gpu/drm/nouveau/include/nvif/cl5070.h
··· 27 27 28 28 struct nv50_disp_mthd_v1 { 29 29 __u8 version; 30 + #define NV50_DISP_MTHD_V1_ACQUIRE 0x01 31 + #define NV50_DISP_MTHD_V1_RELEASE 0x02 30 32 #define NV50_DISP_MTHD_V1_DAC_LOAD 0x11 31 33 #define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21 32 34 #define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22 ··· 39 37 __u16 hasht; 40 38 __u16 hashm; 41 39 __u8 pad06[2]; 40 + }; 41 + 42 + struct nv50_disp_acquire_v0 { 43 + __u8 version; 44 + __u8 or; 45 + __u8 link; 46 + __u8 pad03[5]; 42 47 }; 43 48 44 49 struct nv50_disp_dac_load_v0 {
+2 -1
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 1533 1533 if (conf & 0x100000) 1534 1534 entry->i2c_upper_default = true; 1535 1535 1536 - entry->hasht = (entry->location << 4) | entry->type; 1536 + entry->hasht = (entry->extdev << 8) | (entry->location << 4) | 1537 + entry->type; 1537 1538 entry->hashm = (entry->heads << 8) | (link << 6) | entry->or; 1538 1539 return true; 1539 1540 }
+1
drivers/gpu/drm/nouveau/nouveau_encoder.h
··· 42 42 43 43 struct dcb_output *dcb; 44 44 int or; 45 + int link; 45 46 46 47 struct i2c_adapter *i2c; 47 48 struct nvkm_i2c_aux *aux;
+69 -6
drivers/gpu/drm/nouveau/nv50_display.c
··· 2403 2403 /****************************************************************************** 2404 2404 * Output path helpers 2405 2405 *****************************************************************************/ 2406 + static void 2407 + nv50_outp_release(struct nouveau_encoder *nv_encoder) 2408 + { 2409 + struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev); 2410 + struct { 2411 + struct nv50_disp_mthd_v1 base; 2412 + } args = { 2413 + .base.version = 1, 2414 + .base.method = NV50_DISP_MTHD_V1_RELEASE, 2415 + .base.hasht = nv_encoder->dcb->hasht, 2416 + .base.hashm = nv_encoder->dcb->hashm, 2417 + }; 2418 + 2419 + nvif_mthd(disp->disp, 0, &args, sizeof(args)); 2420 + nv_encoder->or = -1; 2421 + nv_encoder->link = 0; 2422 + } 2423 + 2424 + static int 2425 + nv50_outp_acquire(struct nouveau_encoder *nv_encoder) 2426 + { 2427 + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 2428 + struct nv50_disp *disp = nv50_disp(drm->dev); 2429 + struct { 2430 + struct nv50_disp_mthd_v1 base; 2431 + struct nv50_disp_acquire_v0 info; 2432 + } args = { 2433 + .base.version = 1, 2434 + .base.method = NV50_DISP_MTHD_V1_ACQUIRE, 2435 + .base.hasht = nv_encoder->dcb->hasht, 2436 + .base.hashm = nv_encoder->dcb->hashm, 2437 + }; 2438 + int ret; 2439 + 2440 + ret = nvif_mthd(disp->disp, 0, &args, sizeof(args)); 2441 + if (ret) { 2442 + NV_ERROR(drm, "error acquiring output path: %d\n", ret); 2443 + return ret; 2444 + } 2445 + 2446 + nv_encoder->or = args.info.or; 2447 + nv_encoder->link = args.info.link; 2448 + return 0; 2449 + } 2450 + 2406 2451 static int 2407 2452 nv50_outp_atomic_check_view(struct drm_encoder *encoder, 2408 2453 struct drm_crtc_state *crtc_state, ··· 2527 2482 } 2528 2483 2529 2484 nv_encoder->crtc = NULL; 2485 + nv50_outp_release(nv_encoder); 2530 2486 } 2531 2487 2532 2488 static void ··· 2538 2492 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 2539 2493 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode; 2540 2494 u32 *push; 2495 + 2496 + nv50_outp_acquire(nv_encoder); 2541 2497 2542 2498 push = evo_wait(mast, 8); 2543 2499 if (push) { ··· 2640 2592 if (!nv_encoder) 2641 2593 return -ENOMEM; 2642 2594 nv_encoder->dcb = dcbe; 2643 - nv_encoder->or = ffs(dcbe->or) - 1; 2644 2595 2645 2596 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index); 2646 2597 if (bus) ··· 2806 2759 struct nv50_msto *msto[4]; 2807 2760 2808 2761 bool modified; 2762 + bool disabled; 2763 + int links; 2809 2764 }; 2810 2765 2811 2766 struct nv50_mstc { ··· 2956 2907 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots); 2957 2908 WARN_ON(!r); 2958 2909 2959 - if (mstm->outp->dcb->sorconf.link & 1) 2910 + if (!mstm->links++) 2911 + nv50_outp_acquire(mstm->outp); 2912 + 2913 + if (mstm->outp->link & 1) 2960 2914 proto = 0x8; 2961 2915 else 2962 2916 proto = 0x9; ··· 2991 2939 2992 2940 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); 2993 2941 mstm->modified = true; 2942 + if (!--mstm->links) 2943 + mstm->disabled = true; 2994 2944 msto->disabled = true; 2995 2945 } 2996 2946 ··· 3207 3153 if (mstc && mstc->mstm == mstm) 3208 3154 nv50_msto_prepare(msto); 3209 3155 } 3156 + } 3157 + 3158 + if (mstm->disabled) { 3159 + if (!mstm->links) 3160 + nv50_outp_release(mstm->outp); 3161 + mstm->disabled = false; 3210 3162 } 3211 3163 } 3212 3164 ··· 3512 3452 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0); 3513 3453 nv50_audio_disable(encoder, nv_crtc); 3514 3454 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc); 3455 + nv50_outp_release(nv_encoder); 3515 3456 } 3516 3457 } 3517 3458 ··· 3541 3480 3542 3481 nv_connector = nouveau_encoder_connector_get(nv_encoder); 3543 3482 nv_encoder->crtc = encoder->crtc; 3483 + nv50_outp_acquire(nv_encoder); 3544 3484 3545 3485 switch (nv_encoder->dcb->type) { 3546 3486 case DCB_OUTPUT_TMDS: 3547 - if (nv_encoder->dcb->sorconf.link & 1) { 3487 + if (nv_encoder->link & 1) { 3548 3488 proto = 0x1; 3549 3489 /* Only enable dual-link if: 3550 3490 * - Need to (i.e. rate > 165MHz) ··· 3603 3541 else 3604 3542 depth = 0x6; 3605 3543 3606 - if (nv_encoder->dcb->sorconf.link & 1) 3544 + if (nv_encoder->link & 1) 3607 3545 proto = 0x8; 3608 3546 else 3609 3547 proto = 0x9; ··· 3662 3600 if (!nv_encoder) 3663 3601 return -ENOMEM; 3664 3602 nv_encoder->dcb = dcbe; 3665 - nv_encoder->or = ffs(dcbe->or) - 1; 3666 3603 nv_encoder->update = nv50_sor_update; 3667 3604 3668 3605 encoder = to_drm_encoder(nv_encoder); ··· 3734 3673 } 3735 3674 3736 3675 nv_encoder->crtc = NULL; 3676 + nv50_outp_release(nv_encoder); 3737 3677 } 3738 3678 3739 3679 static void ··· 3748 3686 u8 owner = 1 << nv_crtc->index; 3749 3687 u8 proto, depth; 3750 3688 u32 *push; 3689 + 3690 + nv50_outp_acquire(nv_encoder); 3751 3691 3752 3692 nv_connector = nouveau_encoder_connector_get(nv_encoder); 3753 3693 switch (nv_connector->base.display_info.bpc) { ··· 3838 3774 if (!nv_encoder) 3839 3775 return -ENOMEM; 3840 3776 nv_encoder->dcb = dcbe; 3841 - nv_encoder->or = ffs(dcbe->or) - 1; 3842 3777 nv_encoder->i2c = ddc; 3843 3778 nv_encoder->aux = aux; 3844 3779
-5
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
··· 24 24 #include "dp.h" 25 25 #include "conn.h" 26 26 #include "ior.h" 27 - #include "nv50.h" 28 27 29 28 #include <subdev/bios.h> 30 29 #include <subdev/bios/init.h> ··· 350 351 static int 351 352 nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) 352 353 { 353 - struct nv50_disp *disp = nv50_disp(dp->outp.disp); 354 354 struct nvkm_ior *ior = dp->outp.ior; 355 355 const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT; 356 356 const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE]; ··· 358 360 const struct dp_rates *failsafe = NULL, *cfg; 359 361 int ret = -EINVAL; 360 362 u8 pwr; 361 - 362 - if (!dp->outp.info.location && disp->func->sor.magic) 363 - disp->func->sor.magic(&dp->outp); 364 363 365 364 /* Find the lowest configuration of the OR that can support 366 365 * the required link rate.
+1 -3
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
··· 306 306 307 307 if (nvkm_output_dp_train(outp, pclk)) 308 308 OUTP_ERR(outp, "link not trained before attach"); 309 - } else { 310 - if (disp->func->sor.magic) 311 - disp->func->sor.magic(outp); 312 309 } 313 310 314 311 exec_clkcmp(disp, head, 0, pclk, &conf); ··· 374 377 nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head->id); 375 378 gf119_disp_intr_unk2_0(disp, head->id); 376 379 } 380 + nvkm_outp_route(&disp->base); 377 381 list_for_each_entry(head, &disp->base.head, head) { 378 382 if (!(mask[head->id] & 0x00010000)) 379 383 continue;
+1 -3
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
··· 35 35 .root = &gm200_disp_root_oclass, 36 36 .head.new = gf119_head_new, 37 37 .dac = { .nr = 3, .new = gf119_dac_new }, 38 - .sor.nr = 4, 39 - .sor.new = gm200_sor_new, 40 - .sor.magic = gm200_sor_magic, 38 + .sor = { .nr = 4, .new = gm200_sor_new }, 41 39 }; 42 40 43 41 int
+1 -3
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
··· 34 34 .super = gf119_disp_super, 35 35 .root = &gp100_disp_root_oclass, 36 36 .head.new = gf119_head_new, 37 - .sor.nr = 4, 38 - .sor.new = gm200_sor_new, 39 - .sor.magic = gm200_sor_magic, 37 + .sor = { .nr = 4, .new = gm200_sor_new }, 40 38 }; 41 39 42 40 int
+1 -3
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
··· 60 60 .super = gf119_disp_super, 61 61 .root = &gp102_disp_root_oclass, 62 62 .head.new = gf119_head_new, 63 - .sor.nr = 4, 64 - .sor.new = gm200_sor_new, 65 - .sor.magic = gm200_sor_magic, 63 + .sor = { .nr = 4, .new = gm200_sor_new }, 66 64 }; 67 65 68 66 int
+6
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
··· 17 17 struct list_head head; 18 18 19 19 struct nvkm_ior_state { 20 + struct nvkm_outp *outp; 20 21 unsigned rgdiv; 21 22 unsigned proto_evo:4; 22 23 enum nvkm_ior_proto { ··· 41 40 }; 42 41 43 42 struct nvkm_ior_func { 43 + struct { 44 + int (*get)(struct nvkm_outp *, int *link); 45 + void (*set)(struct nvkm_outp *, struct nvkm_ior *); 46 + } route; 47 + 44 48 void (*state)(struct nvkm_ior *, struct nvkm_ior_state *); 45 49 void (*power)(struct nvkm_ior *, bool normal, bool pu, 46 50 bool data, bool vsync, bool hsync);
+1
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
··· 661 661 continue; 662 662 nv50_disp_intr_unk20_0(disp, head->id); 663 663 } 664 + nvkm_outp_route(&disp->base); 664 665 list_for_each_entry(head, &disp->base.head, head) { 665 666 if (!(super & (0x00000200 << head->id))) 666 667 continue;
-1
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
··· 53 53 struct { 54 54 int nr; 55 55 int (*new)(struct nvkm_disp *, int id); 56 - void (*magic)(struct nvkm_output *); 57 56 } sor; 58 57 59 58 struct {
+125 -6
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
··· 28 28 #include <subdev/bios/dcb.h> 29 29 #include <subdev/i2c.h> 30 30 31 + void 32 + nvkm_outp_route(struct nvkm_disp *disp) 33 + { 34 + struct nvkm_outp *outp; 35 + struct nvkm_ior *ior; 36 + 37 + list_for_each_entry(ior, &disp->ior, head) { 38 + if ((outp = ior->arm.outp) && ior->arm.outp != ior->asy.outp) { 39 + OUTP_DBG(outp, "release %s", ior->name); 40 + if (ior->func->route.set) 41 + ior->func->route.set(outp, NULL); 42 + ior->arm.outp = NULL; 43 + } 44 + } 45 + 46 + list_for_each_entry(ior, &disp->ior, head) { 47 + if ((outp = ior->asy.outp)) { 48 + OUTP_DBG(outp, "acquire %s", ior->name); 49 + if (ior->asy.outp != ior->arm.outp) { 50 + if (ior->func->route.set) 51 + ior->func->route.set(outp, ior); 52 + ior->arm.outp = ior->asy.outp; 53 + } 54 + } 55 + } 56 + } 57 + 31 58 static enum nvkm_ior_proto 32 - nvkm_outp_xlat(struct nvkm_output *outp, enum nvkm_ior_type *type) 59 + nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) 33 60 { 34 61 switch (outp->info.location) { 35 62 case 0: ··· 85 58 } 86 59 87 60 void 61 + nvkm_outp_release(struct nvkm_outp *outp, u8 user) 62 + { 63 + struct nvkm_ior *ior = outp->ior; 64 + OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior); 65 + if (ior) { 66 + outp->acquired &= ~user; 67 + if (!outp->acquired) { 68 + outp->ior->asy.outp = NULL; 69 + outp->ior = NULL; 70 + } 71 + } 72 + } 73 + 74 + static inline int 75 + nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior) 76 + { 77 + outp->ior = ior; 78 + outp->ior->asy.outp = outp; 79 + outp->ior->asy.link = outp->info.sorconf.link; 80 + outp->acquired |= user; 81 + return 0; 82 + } 83 + 84 + int 85 + nvkm_outp_acquire(struct nvkm_outp *outp, u8 user) 86 + { 87 + struct nvkm_ior *ior = outp->ior; 88 + enum nvkm_ior_proto proto; 89 + enum nvkm_ior_type type; 90 + 91 + OUTP_TRACE(outp, "acquire %02x |= %02x %p", outp->acquired, user, ior); 92 + if (ior) { 93 + outp->acquired |= user; 94 + return 0; 95 + } 96 + 97 + /* Lookup a compatible, and unused, OR to assign to the device. */ 98 + proto = nvkm_outp_xlat(outp, &type); 99 + if (proto == UNKNOWN) 100 + return -ENOSYS; 101 + 102 + /* First preference is to reuse the OR that is currently armed 103 + * on HW, if any, in order to prevent unnecessary switching. 104 + */ 105 + list_for_each_entry(ior, &outp->disp->ior, head) { 106 + if (!ior->asy.outp && ior->arm.outp == outp) 107 + return nvkm_outp_acquire_ior(outp, user, ior); 108 + } 109 + 110 + /* Failing that, a completely unused OR is the next best thing. */ 111 + list_for_each_entry(ior, &outp->disp->ior, head) { 112 + if (!ior->asy.outp && ior->type == type && !ior->arm.outp && 113 + ior->id == __ffs(outp->info.or)) 114 + return nvkm_outp_acquire_ior(outp, user, ior); 115 + } 116 + 117 + /* Last resort is to assign an OR that's already active on HW, 118 + * but will be released during the next modeset. 119 + */ 120 + list_for_each_entry(ior, &outp->disp->ior, head) { 121 + if (!ior->asy.outp && ior->type == type && 122 + ior->id == __ffs(outp->info.or)) 123 + return nvkm_outp_acquire_ior(outp, user, ior); 124 + } 125 + 126 + return -ENOSPC; 127 + } 128 + 129 + void 88 130 nvkm_outp_fini(struct nvkm_outp *outp) 89 131 { 90 132 if (outp->func->fini) ··· 161 65 } 162 66 163 67 static void 164 - nvkm_outp_init_route(struct nvkm_output *outp) 68 + nvkm_outp_init_route(struct nvkm_outp *outp) 165 69 { 166 70 struct nvkm_disp *disp = outp->disp; 167 71 enum nvkm_ior_proto proto; 168 72 enum nvkm_ior_type type; 169 73 struct nvkm_ior *ior; 170 - int id; 74 + int id, link; 171 75 76 + /* Find any OR from the class that is able to support this device. */ 172 77 proto = nvkm_outp_xlat(outp, &type); 173 78 if (proto == UNKNOWN) 174 79 return; 175 80 81 + ior = nvkm_ior_find(disp, type, -1); 82 + if (!ior) { 83 + WARN_ON(1); 84 + return; 85 + } 86 + 176 87 /* Determine the specific OR, if any, this device is attached to. */ 177 - if (1) { 88 + if (ior->func->route.get) { 89 + id = ior->func->route.get(outp, &link); 90 + if (id < 0) { 91 + OUTP_DBG(outp, "no route"); 92 + return; 93 + } 94 + } else { 178 95 /* Prior to DCB 4.1, this is hardwired like so. */ 179 - id = ffs(outp->info.or) - 1; 96 + id = ffs(outp->info.or) - 1; 97 + link = (ior->type == SOR) ? outp->info.sorconf.link : 0; 180 98 } 181 99 182 100 ior = nvkm_ior_find(disp, type, id); ··· 199 89 return; 200 90 } 201 91 202 - outp->ior = ior; 92 + /* Determine if the OR is already configured for this device. */ 93 + ior->func->state(ior, &ior->arm); 94 + if (!ior->arm.head || ior->arm.proto != proto) { 95 + OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, 96 + ior->arm.proto, proto); 97 + return; 98 + } 99 + 100 + OUTP_DBG(outp, "on %s link %x", ior->name, ior->arm.link); 101 + ior->arm.outp = outp; 203 102 } 204 103 205 104 void
+6 -2
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
··· 18 18 struct nvkm_conn *conn; 19 19 20 20 /* Assembly state. */ 21 + #define NVKM_OUTP_PRIV 1 22 + #define NVKM_OUTP_USER 2 23 + u8 acquired:2; 21 24 struct nvkm_ior *ior; 22 25 }; 23 26 ··· 31 28 void nvkm_outp_del(struct nvkm_outp **); 32 29 void nvkm_outp_init(struct nvkm_outp *); 33 30 void nvkm_outp_fini(struct nvkm_outp *); 31 + int nvkm_outp_acquire(struct nvkm_outp *, u8 user); 32 + void nvkm_outp_release(struct nvkm_outp *, u8 user); 33 + void nvkm_outp_route(struct nvkm_disp *); 34 34 35 35 struct nvkm_outp_func { 36 36 void *(*dtor)(struct nvkm_outp *); ··· 44 38 #define nvkm_output nvkm_outp 45 39 #define nvkm_output_func nvkm_outp_func 46 40 #define nvkm_output_new_ nvkm_outp_new_ 47 - 48 - void gm200_sor_magic(struct nvkm_output *outp); 49 41 50 42 #define OUTP_MSG(o,l,f,a...) do { \ 51 43 struct nvkm_outp *_outp = (o); \
+22
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
··· 94 94 } 95 95 96 96 switch (mthd * !!outp) { 97 + case NV50_DISP_MTHD_V1_ACQUIRE: { 98 + union { 99 + struct nv50_disp_acquire_v0 v0; 100 + } *args = data; 101 + int ret = -ENOSYS; 102 + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 103 + ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER); 104 + if (ret == 0) { 105 + args->v0.or = outp->ior->id; 106 + args->v0.link = outp->ior->asy.link; 107 + } 108 + } 109 + return ret; 110 + } 111 + break; 112 + case NV50_DISP_MTHD_V1_RELEASE: 113 + nvkm_outp_release(outp, NVKM_OUTP_USER); 114 + return 0; 97 115 case NV50_DISP_MTHD_V1_DAC_LOAD: { 98 116 union { 99 117 struct nv50_disp_dac_load_v0 v0; ··· 120 102 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 121 103 if (args->v0.data & 0xfff00000) 122 104 return -EINVAL; 105 + ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV); 106 + if (ret) 107 + return ret; 123 108 ret = outp->ior->func->sense(outp->ior, args->v0.data); 109 + nvkm_outp_release(outp, NVKM_OUTP_PRIV); 124 110 if (ret < 0) 125 111 return ret; 126 112 args->v0.load = ret;
+42 -7
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
··· 45 45 nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift)); 46 46 } 47 47 48 - void 49 - gm200_sor_magic(struct nvkm_output *outp) 48 + static void 49 + gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior) 50 50 { 51 51 struct nvkm_device *device = outp->disp->engine.subdev.device; 52 - const u32 soff = outp->or * 0x100; 53 - const u32 data = outp->or + 1; 54 - if (outp->info.sorconf.link & 1) 55 - nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data); 52 + const u32 moff = __ffs(outp->info.or) * 0x100; 53 + const u32 sor = ior ? ior->id + 1 : 0; 54 + u32 link = ior ? (ior->asy.link == 2) : 0; 55 + 56 + if (outp->info.sorconf.link & 1) { 57 + nvkm_mask(device, 0x612308 + moff, 0x0000001f, link << 4 | sor); 58 + link++; 59 + } 60 + 56 61 if (outp->info.sorconf.link & 2) 57 - nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data); 62 + nvkm_mask(device, 0x612388 + moff, 0x0000001f, link << 4 | sor); 63 + } 64 + 65 + static int 66 + gm200_sor_route_get(struct nvkm_outp *outp, int *link) 67 + { 68 + struct nvkm_device *device = outp->disp->engine.subdev.device; 69 + const int sublinks = outp->info.sorconf.link; 70 + int lnk[2], sor[2], m, s; 71 + 72 + for (*link = 0, m = __ffs(outp->info.or) * 2, s = 0; s < 2; m++, s++) { 73 + if (sublinks & BIT(s)) { 74 + u32 data = nvkm_rd32(device, 0x612308 + (m * 0x80)); 75 + lnk[s] = (data & 0x00000010) >> 4; 76 + sor[s] = (data & 0x0000000f); 77 + if (!sor[s]) 78 + return -1; 79 + *link |= lnk[s]; 80 + } 81 + } 82 + 83 + if (sublinks == 3) { 84 + if (sor[0] != sor[1] || WARN_ON(lnk[0] || !lnk[1])) 85 + return -1; 86 + } 87 + 88 + return ((sublinks & 1) ? sor[0] : sor[1]) - 1; 58 89 } 59 90 60 91 static const struct nvkm_ior_func 61 92 gm200_sor = { 93 + .route = { 94 + .get = gm200_sor_route_get, 95 + .set = gm200_sor_route_set, 96 + }, 62 97 .state = gf119_sor_state, 63 98 .power = nv50_sor_power, 64 99 .hdmi = {