Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau/nvif: simplify and tidy library interfaces

A variety of tweaks to the NVIF library interfaces, mostly ripping out
things that turned out to be not so useful.

- Removed refcounting from nvif_object, callers are expected to not be
stupid instead.
- nvif_client is directly reachable from anything derived from nvif_object,
removing the need for heuristics to locate it
- _new() versions of interfaces, that allocate memory for the object
they construct, have been removed. The vast majority of callers used
the embedded _init() interfaces.
- No longer storing constructor arguments (and the data returned from
nvkm) inside nvif_object, it's more or less unused and just wastes
memory.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+364 -573
+1 -1
drivers/gpu/drm/nouveau/dispnv04/arb.c
··· 198 198 int *burst, int *lwm) 199 199 { 200 200 struct nouveau_drm *drm = nouveau_drm(dev); 201 - struct nvif_device *device = &nouveau_drm(dev)->device; 201 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 202 202 struct nv_fifo_info fifo_data; 203 203 struct nv_sim_state sim_data; 204 204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+8 -7
drivers/gpu/drm/nouveau/dispnv04/dac.c
··· 65 65 66 66 static int sample_load_twice(struct drm_device *dev, bool sense[2]) 67 67 { 68 - struct nvif_device *device = &nouveau_drm(dev)->device; 68 + struct nouveau_drm *drm = nouveau_drm(dev); 69 + struct nvif_object *device = &drm->device.object; 69 70 int i; 70 71 71 72 for (i = 0; i < 2; i++) { ··· 80 79 * use a 10ms timeout (guards against crtc being inactive, in 81 80 * which case blank state would never change) 82 81 */ 83 - if (nvif_msec(device, 10, 82 + if (nvif_msec(&drm->device, 10, 84 83 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 85 84 break; 86 85 ) < 0) 87 86 return -EBUSY; 88 87 89 - if (nvif_msec(device, 10, 88 + if (nvif_msec(&drm->device, 10, 90 89 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 91 90 break; 92 91 ) < 0) 93 92 return -EBUSY; 94 93 95 - if (nvif_msec(device, 10, 94 + if (nvif_msec(&drm->device, 10, 96 95 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) 97 96 break; 98 97 ) < 0) ··· 133 132 struct drm_connector *connector) 134 133 { 135 134 struct drm_device *dev = encoder->dev; 136 - struct nvif_device *device = &nouveau_drm(dev)->device; 135 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 137 136 struct nouveau_drm *drm = nouveau_drm(dev); 138 137 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 139 138 uint8_t saved_palette0[3], saved_palette_mask; ··· 236 235 { 237 236 struct drm_device *dev = encoder->dev; 238 237 struct nouveau_drm *drm = nouveau_drm(dev); 239 - struct nvif_device *device = &nouveau_drm(dev)->device; 240 - struct nvkm_gpio *gpio = nvxx_gpio(device); 238 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 239 + struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); 241 240 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 242 241 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 243 242 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+1 -1
drivers/gpu/drm/nouveau/dispnv04/dfp.c
··· 281 281 struct drm_display_mode *adjusted_mode) 282 282 { 283 283 struct drm_device *dev = encoder->dev; 284 - struct nvif_device *device = &nouveau_drm(dev)->device; 284 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 285 285 struct nouveau_drm *drm = nouveau_drm(dev); 286 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 287 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+2 -2
drivers/gpu/drm/nouveau/dispnv04/disp.c
··· 47 47 if (!disp) 48 48 return -ENOMEM; 49 49 50 - nvif_object_map(nvif_object(&drm->device)); 50 + nvif_object_map(&drm->device.object); 51 51 52 52 nouveau_display(dev)->priv = disp; 53 53 nouveau_display(dev)->dtor = nv04_display_destroy; ··· 153 153 nouveau_display(dev)->priv = NULL; 154 154 kfree(disp); 155 155 156 - nvif_object_unmap(nvif_object(&drm->device)); 156 + nvif_object_unmap(&drm->device.object); 157 157 } 158 158 159 159 int
+11 -11
drivers/gpu/drm/nouveau/dispnv04/hw.c
··· 165 165 struct nvkm_pll_vals *pllvals) 166 166 { 167 167 struct nouveau_drm *drm = nouveau_drm(dev); 168 - struct nvif_device *device = &drm->device; 169 - struct nvkm_bios *bios = nvxx_bios(device); 168 + struct nvif_object *device = &drm->device.object; 169 + struct nvkm_bios *bios = nvxx_bios(&drm->device); 170 170 uint32_t reg1, pll1, pll2 = 0; 171 171 struct nvbios_pll pll_lim; 172 172 int ret; ··· 660 660 struct nv04_mode_state *state) 661 661 { 662 662 struct nouveau_drm *drm = nouveau_drm(dev); 663 - struct nvif_device *device = &drm->device; 663 + struct nvif_object *device = &drm->device.object; 664 664 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 665 665 uint32_t reg900; 666 666 int i; ··· 677 677 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); 678 678 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 679 679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 680 - nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1); 681 - nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1); 682 - nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1); 683 - nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1); 680 + nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1); 681 + nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1); 682 + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1); 683 + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1); 684 684 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); 685 685 686 686 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); ··· 740 740 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { 741 741 /* Not waiting for vertical retrace before modifying 742 742 CRE_53/CRE_54 causes lockups. */ 743 - nvif_msec(device, 650, 743 + nvif_msec(&drm->device, 650, 744 744 if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) 745 745 break; 746 746 ); 747 - nvif_msec(device, 650, 747 + nvif_msec(&drm->device, 650, 748 748 if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) 749 749 break; 750 750 ); ··· 770 770 nv_save_state_palette(struct drm_device *dev, int head, 771 771 struct nv04_mode_state *state) 772 772 { 773 - struct nvif_device *device = &nouveau_drm(dev)->device; 773 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 774 774 int head_offset = head * NV_PRMDIO_SIZE, i; 775 775 776 776 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, ··· 789 789 nouveau_hw_load_state_palette(struct drm_device *dev, int head, 790 790 struct nv04_mode_state *state) 791 791 { 792 - struct nvif_device *device = &nouveau_drm(dev)->device; 792 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 793 793 int head_offset = head * NV_PRMDIO_SIZE, i; 794 794 795 795 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+13 -13
drivers/gpu/drm/nouveau/dispnv04/hw.h
··· 60 60 static inline uint32_t NVReadCRTC(struct drm_device *dev, 61 61 int head, uint32_t reg) 62 62 { 63 - struct nvif_device *device = &nouveau_drm(dev)->device; 63 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 64 64 uint32_t val; 65 65 if (head) 66 66 reg += NV_PCRTC0_SIZE; ··· 71 71 static inline void NVWriteCRTC(struct drm_device *dev, 72 72 int head, uint32_t reg, uint32_t val) 73 73 { 74 - struct nvif_device *device = &nouveau_drm(dev)->device; 74 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 75 75 if (head) 76 76 reg += NV_PCRTC0_SIZE; 77 77 nvif_wr32(device, reg, val); ··· 80 80 static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 81 81 int head, uint32_t reg) 82 82 { 83 - struct nvif_device *device = &nouveau_drm(dev)->device; 83 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 84 84 uint32_t val; 85 85 if (head) 86 86 reg += NV_PRAMDAC0_SIZE; ··· 91 91 static inline void NVWriteRAMDAC(struct drm_device *dev, 92 92 int head, uint32_t reg, uint32_t val) 93 93 { 94 - struct nvif_device *device = &nouveau_drm(dev)->device; 94 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 95 95 if (head) 96 96 reg += NV_PRAMDAC0_SIZE; 97 97 nvif_wr32(device, reg, val); ··· 120 120 static inline void NVWriteVgaCrtc(struct drm_device *dev, 121 121 int head, uint8_t index, uint8_t value) 122 122 { 123 - struct nvif_device *device = &nouveau_drm(dev)->device; 123 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 124 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 125 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); 126 126 } ··· 128 128 static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 129 129 int head, uint8_t index) 130 130 { 131 - struct nvif_device *device = &nouveau_drm(dev)->device; 131 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 132 132 uint8_t val; 133 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 134 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); ··· 165 165 static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 166 166 int head, uint32_t reg) 167 167 { 168 - struct nvif_device *device = &nouveau_drm(dev)->device; 168 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 169 169 struct nouveau_drm *drm = nouveau_drm(dev); 170 170 uint8_t val; 171 171 ··· 181 181 static inline void NVWritePRMVIO(struct drm_device *dev, 182 182 int head, uint32_t reg, uint8_t value) 183 183 { 184 - struct nvif_device *device = &nouveau_drm(dev)->device; 184 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 185 185 struct nouveau_drm *drm = nouveau_drm(dev); 186 186 187 187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call ··· 194 194 195 195 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 196 196 { 197 - struct nvif_device *device = &nouveau_drm(dev)->device; 197 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 198 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 199 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 200 200 } 201 201 202 202 static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 203 203 { 204 - struct nvif_device *device = &nouveau_drm(dev)->device; 204 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 205 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 206 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 207 207 } ··· 209 209 static inline void NVWriteVgaAttr(struct drm_device *dev, 210 210 int head, uint8_t index, uint8_t value) 211 211 { 212 - struct nvif_device *device = &nouveau_drm(dev)->device; 212 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 213 213 if (NVGetEnablePalette(dev, head)) 214 214 index &= ~0x20; 215 215 else ··· 223 223 static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 224 224 int head, uint8_t index) 225 225 { 226 - struct nvif_device *device = &nouveau_drm(dev)->device; 226 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 227 227 uint8_t val; 228 228 if (NVGetEnablePalette(dev, head)) 229 229 index &= ~0x20; ··· 259 259 static inline bool 260 260 nv_heads_tied(struct drm_device *dev) 261 261 { 262 - struct nvif_device *device = &nouveau_drm(dev)->device; 262 + struct nvif_object *device = &nouveau_drm(dev)->device.object; 263 263 struct nouveau_drm *drm = nouveau_drm(dev); 264 264 265 265 if (drm->device.info.chipset == 0x11)
+7 -6
drivers/gpu/drm/nouveau/dispnv04/overlay.c
··· 96 96 uint32_t src_x, uint32_t src_y, 97 97 uint32_t src_w, uint32_t src_h) 98 98 { 99 - struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 99 + struct nouveau_drm *drm = nouveau_drm(plane->dev); 100 + struct nvif_object *dev = &drm->device.object; 100 101 struct nouveau_plane *nv_plane = 101 102 container_of(plane, struct nouveau_plane, base); 102 103 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); ··· 119 118 if (format > 0xffff) 120 119 return -ERANGE; 121 120 122 - if (dev->info.chipset >= 0x30) { 121 + if (drm->device.info.chipset >= 0x30) { 123 122 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) 124 123 return -ERANGE; 125 124 } else { ··· 174 173 static int 175 174 nv10_disable_plane(struct drm_plane *plane) 176 175 { 177 - struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 176 + struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 178 177 struct nouveau_plane *nv_plane = 179 178 container_of(plane, struct nouveau_plane, base); 180 179 ··· 198 197 static void 199 198 nv10_set_params(struct nouveau_plane *plane) 200 199 { 201 - struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device; 200 + struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object; 202 201 u32 luma = (plane->brightness - 512) << 16 | plane->contrast; 203 202 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | 204 203 (cos_mul(plane->hue, plane->saturation) & 0xffff); ··· 347 346 uint32_t src_x, uint32_t src_y, 348 347 uint32_t src_w, uint32_t src_h) 349 348 { 350 - struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 349 + struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 351 350 struct nouveau_plane *nv_plane = 352 351 container_of(plane, struct nouveau_plane, base); 353 352 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); ··· 427 426 static int 428 427 nv04_disable_plane(struct drm_plane *plane) 429 428 { 430 - struct nvif_device *dev = &nouveau_drm(plane->dev)->device; 429 + struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; 431 430 struct nouveau_plane *nv_plane = 432 431 container_of(plane, struct nouveau_plane, base); 433 432
+2 -2
drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
··· 131 131 uint32_t val) 132 132 { 133 133 struct nvif_device *device = &nouveau_drm(dev)->device; 134 - nvif_wr32(device, reg, val); 134 + nvif_wr32(&device->object, reg, val); 135 135 } 136 136 137 137 static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 138 138 { 139 139 struct nvif_device *device = &nouveau_drm(dev)->device; 140 - return nvif_rd32(device, reg); 140 + return nvif_rd32(&device->object, reg); 141 141 } 142 142 143 143 static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
+7 -19
drivers/gpu/drm/nouveau/include/nvif/client.h
··· 4 4 #include <nvif/object.h> 5 5 6 6 struct nvif_client { 7 - struct nvif_object base; 8 - struct nvif_object *object; /*XXX: hack for nvif_object() */ 7 + struct nvif_object object; 9 8 const struct nvif_driver *driver; 9 + u8 route; 10 10 bool super; 11 11 }; 12 12 13 - static inline struct nvif_client * 14 - nvif_client(struct nvif_object *object) 15 - { 16 - while (object && object->parent != object) 17 - object = object->parent; 18 - return (void *)object; 19 - } 20 - 21 - int nvif_client_init(void (*dtor)(struct nvif_client *), const char *, 22 - const char *, u64, const char *, const char *, 13 + int nvif_client_init(const char *drv, const char *name, u64 device, 14 + const char *cfg, const char *dbg, 23 15 struct nvif_client *); 24 16 void nvif_client_fini(struct nvif_client *); 25 - int nvif_client_new(const char *, const char *, u64, const char *, 26 - const char *, struct nvif_client **); 27 - void nvif_client_ref(struct nvif_client *, struct nvif_client **); 28 17 int nvif_client_ioctl(struct nvif_client *, void *, u32); 29 18 int nvif_client_suspend(struct nvif_client *); 30 19 int nvif_client_resume(struct nvif_client *); 31 20 32 21 /*XXX*/ 33 22 #include <core/client.h> 34 - #define nvxx_client(a) ({ \ 35 - struct nvif_client *_client = nvif_client(nvif_object(a)); \ 36 - nvkm_client(_client->base.priv); \ 23 + #define nvxx_client(a) ({ \ 24 + struct nvif_client *_client = (a); \ 25 + nvkm_client(_client->object.priv); \ 37 26 }) 38 - 39 27 #endif
+7 -17
drivers/gpu/drm/nouveau/include/nvif/device.h
··· 5 5 #include <nvif/class.h> 6 6 7 7 struct nvif_device { 8 - struct nvif_object base; 9 - struct nvif_object *object; /*XXX: hack for nvif_object() */ 8 + struct nvif_object object; 10 9 struct nv_device_info_v0 info; 11 10 }; 12 11 13 - static inline struct nvif_device * 14 - nvif_device(struct nvif_object *object) 15 - { 16 - while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ ) 17 - object = object->parent; 18 - return (void *)object; 19 - } 20 - 21 - int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *), 22 - u32 handle, u32 oclass, void *, u32, 12 + int nvif_device_init(struct nvif_object *, u32 handle, u32 oclass, void *, u32, 23 13 struct nvif_device *); 24 14 void nvif_device_fini(struct nvif_device *); 25 - int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass, 26 - void *, u32, struct nvif_device **); 27 - void nvif_device_ref(struct nvif_device *, struct nvif_device **); 28 15 u64 nvif_device_time(struct nvif_device *); 29 16 30 17 /* Delay based on GPU time (ie. PTIMER). ··· 46 59 #include <subdev/timer.h> 47 60 #include <subdev/therm.h> 48 61 49 - #define nvxx_device(a) nv_device(nvxx_object((a))) 62 + #define nvxx_device(a) ({ \ 63 + struct nvif_device *_device = (a); \ 64 + nv_device(_device->object.priv); \ 65 + }) 50 66 #define nvxx_bios(a) nvkm_bios(nvxx_device(a)) 51 67 #define nvxx_fb(a) nvkm_fb(nvxx_device(a)) 52 68 #define nvxx_mmu(a) nvkm_mmu(nvxx_device(a)) ··· 67 77 68 78 #define nvxx_fifo(a) nvkm_fifo(nvxx_device(a)) 69 79 #define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a)) 70 - #define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR)) 80 + #define nvxx_gr(a) nvkm_gr(nvxx_device(a)) 71 81 #endif
+3 -9
drivers/gpu/drm/nouveau/include/nvif/notify.h
··· 23 23 struct work_struct work; 24 24 }; 25 25 26 - int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *), 27 - int (*func)(struct nvif_notify *), bool work, u8 type, 28 - void *data, u32 size, u32 reply, struct nvif_notify *); 26 + int nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *), 27 + bool work, u8 type, void *data, u32 size, u32 reply, 28 + struct nvif_notify *); 29 29 int nvif_notify_fini(struct nvif_notify *); 30 30 int nvif_notify_get(struct nvif_notify *); 31 31 int nvif_notify_put(struct nvif_notify *); 32 32 int nvif_notify(const void *, u32, const void *, u32); 33 - 34 - int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *), 35 - bool work, u8 type, void *data, u32 size, u32 reply, 36 - struct nvif_notify **); 37 - void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **); 38 - 39 33 #endif
+25 -32
drivers/gpu/drm/nouveau/include/nvif/object.h
··· 4 4 #include <nvif/os.h> 5 5 6 6 struct nvif_object { 7 + struct nvif_client *client; 7 8 struct nvif_object *parent; 8 - struct nvif_object *object; /*XXX: hack for nvif_object() */ 9 - struct kref refcount; 10 9 u32 handle; 11 10 u32 oclass; 12 - void *data; 13 - u32 size; 14 11 void *priv; /*XXX: hack */ 15 - void (*dtor)(struct nvif_object *); 16 12 struct { 17 13 void __iomem *ptr; 18 14 u32 size; 19 15 } map; 20 16 }; 21 17 22 - int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *), 23 - u32 handle, u32 oclass, void *, u32, 18 + int nvif_object_init(struct nvif_object *, u32 handle, u32 oclass, void *, u32, 24 19 struct nvif_object *); 25 20 void nvif_object_fini(struct nvif_object *); 26 - int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass, 27 - void *, u32, struct nvif_object **); 28 - void nvif_object_ref(struct nvif_object *, struct nvif_object **); 29 21 int nvif_object_ioctl(struct nvif_object *, void *, u32, void **); 30 22 int nvif_object_sclass(struct nvif_object *, u32 *, int); 31 23 u32 nvif_object_rd(struct nvif_object *, int, u64); ··· 28 36 29 37 #define nvif_object(a) (a)->object 30 38 31 - #define ioread8_native ioread8 32 - #define iowrite8_native iowrite8 33 - #define nvif_rd(a,b,c) ({ \ 34 - struct nvif_object *_object = nvif_object(a); \ 39 + #define nvif_rd(a,f,b,c) ({ \ 40 + struct nvif_object *_object = (a); \ 35 41 u32 _data; \ 36 42 if (likely(_object->map.ptr)) \ 37 - _data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c)); \ 43 + _data = f((u8 __iomem *)_object->map.ptr + (c)); \ 38 44 else \ 39 - _data = nvif_object_rd(_object, (b) / 8, (c)); \ 45 + _data = nvif_object_rd(_object, (b), (c)); \ 40 46 _data; \ 41 47 }) 42 - #define nvif_wr(a,b,c,d) ({ \ 43 - struct nvif_object *_object = nvif_object(a); \ 48 + #define nvif_wr(a,f,b,c,d) ({ \ 49 + struct nvif_object *_object = (a); \ 44 50 if (likely(_object->map.ptr)) \ 45 - iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c)); \ 51 + f((d), (u8 __iomem *)_object->map.ptr + (c)); \ 46 52 else \ 47 - nvif_object_wr(_object, (b) / 8, (c), (d)); \ 53 + nvif_object_wr(_object, (b), (c), (d)); \ 48 54 }) 49 - #define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; }) 50 - #define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; }) 51 - #define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; }) 52 - #define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c)) 53 - #define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c)) 54 - #define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c)) 55 + #define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); }) 56 + #define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); }) 57 + #define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); }) 58 + #define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c)) 59 + #define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c)) 60 + #define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c)) 55 61 #define nvif_mask(a,b,c,d) ({ \ 56 - u32 _v = nvif_rd32(nvif_object(a), (b)); \ 57 - nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \ 58 - _v; \ 62 + struct nvif_object *__object = (a); \ 63 + u32 _addr = (b), _data = nvif_rd32(__object, _addr); \ 64 + nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \ 65 + _data; \ 59 66 }) 60 67 61 - #define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d)) 68 + #define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d)) 62 69 63 70 /*XXX*/ 64 71 #include <core/object.h> 65 - #define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv) 66 - 72 + #define nvxx_object(a) ({ \ 73 + struct nvif_object *_object = (a); \ 74 + (struct nvkm_object *)_object->priv; \ 75 + }) 67 76 #endif
+54 -80
drivers/gpu/drm/nouveau/nouveau_abi16.c
··· 51 51 * device (ie. the one that belongs to the fd it 52 52 * opened) 53 53 */ 54 - if (nvif_device_init(&cli->base.base, NULL, 54 + if (nvif_device_init(&cli->base.object, 55 55 NOUVEAU_ABI16_DEVICE, NV_DEVICE, 56 56 &args, sizeof(args), 57 57 &abi16->device) == 0) ··· 69 69 int 70 70 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) 71 71 { 72 - struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); 72 + struct nouveau_cli *cli = (void *)abi16->device.object.client; 73 73 mutex_unlock(&cli->mutex); 74 74 return ret; 75 75 } ··· 100 100 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan, 101 101 struct nouveau_abi16_ntfy *ntfy) 102 102 { 103 + nvif_object_fini(&ntfy->object); 103 104 nvkm_mm_free(&chan->heap, &ntfy->node); 104 105 list_del(&ntfy->head); 105 106 kfree(ntfy); ··· 133 132 134 133 /* destroy channel object, all children will be killed too */ 135 134 if (chan->chan) { 136 - abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff)); 135 + abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff)); 137 136 nouveau_channel_del(&chan->chan); 138 137 } 139 138 ··· 144 143 void 145 144 nouveau_abi16_fini(struct nouveau_abi16 *abi16) 146 145 { 147 - struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); 146 + struct nouveau_cli *cli = (void *)abi16->device.object.client; 148 147 struct nouveau_abi16_chan *chan, *temp; 149 148 150 149 /* cleanup channels */ ··· 337 336 struct nouveau_abi16_chan *chan; 338 337 339 338 list_for_each_entry(chan, &abi16->channels, head) { 340 - if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel)) 339 + if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel)) 341 340 return chan; 342 341 } 343 342 ··· 365 364 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 366 365 { 367 366 struct drm_nouveau_grobj_alloc *init = data; 368 - struct { 369 - struct nvif_ioctl_v0 ioctl; 370 - struct nvif_ioctl_new_v0 new; 371 - } args = { 372 - .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, 373 - .ioctl.type = NVIF_IOCTL_V0_NEW, 374 - .ioctl.path_nr = 3, 375 - .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, 376 - .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, 377 - .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel), 378 - .new.route = NVDRM_OBJECT_ABI16, 379 - .new.handle = init->handle, 380 - .new.oclass = init->class, 381 - }; 382 367 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 368 + struct nouveau_abi16_chan *chan; 369 + struct nouveau_abi16_ntfy *ntfy; 383 370 struct nouveau_drm *drm = nouveau_drm(dev); 384 371 struct nvif_client *client; 385 372 int ret; ··· 377 388 378 389 if (init->handle == ~0) 379 390 return nouveau_abi16_put(abi16, -EINVAL); 380 - client = nvif_client(nvif_object(&abi16->device)); 391 + client = abi16->device.object.client; 381 392 382 393 /* compatibility with userspace that assumes 506e for all chipsets */ 383 394 if (init->class == 0x506e) { ··· 386 397 return nouveau_abi16_put(abi16, 0); 387 398 } 388 399 389 - ret = nvif_client_ioctl(client, &args, sizeof(args)); 400 + chan = nouveau_abi16_chan(abi16, init->channel); 401 + if (!chan) 402 + return nouveau_abi16_put(abi16, -ENOENT); 403 + 404 + ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL); 405 + if (!ntfy) 406 + return nouveau_abi16_put(abi16, -ENOMEM); 407 + 408 + list_add(&ntfy->head, &chan->notifiers); 409 + 410 + client->route = NVDRM_OBJECT_ABI16; 411 + ret = nvif_object_init(&chan->chan->user, init->handle, init->class, 412 + NULL, 0, &ntfy->object); 413 + client->route = NVDRM_OBJECT_NVIF; 414 + 415 + if (ret) 416 + nouveau_abi16_ntfy_fini(chan, ntfy); 390 417 return nouveau_abi16_put(abi16, ret); 391 418 } 392 419 ··· 410 405 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 411 406 { 412 407 struct drm_nouveau_notifierobj_alloc *info = data; 413 - struct { 414 - struct nvif_ioctl_v0 ioctl; 415 - struct nvif_ioctl_new_v0 new; 416 - struct nv_dma_v0 ctxdma; 417 - } args = { 418 - .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, 419 - .ioctl.type = NVIF_IOCTL_V0_NEW, 420 - .ioctl.path_nr = 3, 421 - .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, 422 - .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, 423 - .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel), 424 - .new.route = NVDRM_OBJECT_ABI16, 425 - .new.handle = info->handle, 426 - .new.oclass = NV_DMA_IN_MEMORY, 427 - }; 428 408 struct nouveau_drm *drm = nouveau_drm(dev); 429 409 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 430 410 struct nouveau_abi16_chan *chan; 431 411 struct nouveau_abi16_ntfy *ntfy; 432 412 struct nvif_device *device = &abi16->device; 433 413 struct nvif_client *client; 414 + struct nv_dma_v0 args = {}; 434 415 int ret; 435 416 436 417 if (unlikely(!abi16)) ··· 425 434 /* completely unnecessary for these chipsets... */ 426 435 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI)) 427 436 return nouveau_abi16_put(abi16, -EINVAL); 428 - client = nvif_client(nvif_object(&abi16->device)); 437 + client = abi16->device.object.client; 429 438 430 439 chan = nouveau_abi16_chan(abi16, info->channel); 431 440 if (!chan) ··· 436 445 return nouveau_abi16_put(abi16, -ENOMEM); 437 446 438 447 list_add(&ntfy->head, &chan->notifiers); 439 - ntfy->handle = info->handle; 440 448 441 449 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1, 442 450 &ntfy->node); 443 451 if (ret) 444 452 goto done; 445 453 446 - args.ctxdma.start = ntfy->node->offset; 447 - args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1; 454 + args.start = ntfy->node->offset; 455 + args.limit = ntfy->node->offset + ntfy->node->length - 1; 448 456 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 449 - args.ctxdma.target = NV_DMA_V0_TARGET_VM; 450 - args.ctxdma.access = NV_DMA_V0_ACCESS_VM; 451 - args.ctxdma.start += chan->ntfy_vma.offset; 452 - args.ctxdma.limit += chan->ntfy_vma.offset; 457 + args.target = NV_DMA_V0_TARGET_VM; 458 + args.access = NV_DMA_V0_ACCESS_VM; 459 + args.start += chan->ntfy_vma.offset; 460 + args.limit += chan->ntfy_vma.offset; 453 461 } else 454 462 if (drm->agp.stat == ENABLED) { 455 - args.ctxdma.target = NV_DMA_V0_TARGET_AGP; 456 - args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; 457 - args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset; 458 - args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset; 459 - client->super = true; 463 + args.target = NV_DMA_V0_TARGET_AGP; 464 + args.access = NV_DMA_V0_ACCESS_RDWR; 465 + args.start += drm->agp.base + chan->ntfy->bo.offset; 466 + args.limit += drm->agp.base + chan->ntfy->bo.offset; 460 467 } else { 461 - args.ctxdma.target = NV_DMA_V0_TARGET_VM; 462 - args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; 463 - args.ctxdma.start += chan->ntfy->bo.offset; 464 - args.ctxdma.limit += chan->ntfy->bo.offset; 468 + args.target = NV_DMA_V0_TARGET_VM; 469 + args.access = NV_DMA_V0_ACCESS_RDWR; 470 + args.start += chan->ntfy->bo.offset; 471 + args.limit += chan->ntfy->bo.offset; 465 472 } 466 473 467 - ret = nvif_client_ioctl(client, &args, sizeof(args)); 474 + client->route = NVDRM_OBJECT_ABI16; 475 + client->super = true; 476 + ret = nvif_object_init(&chan->chan->user, info->handle, 477 + NV_DMA_IN_MEMORY, &args, sizeof(args), 478 + &ntfy->object); 468 479 client->super = false; 480 + client->route = NVDRM_OBJECT_NVIF; 469 481 if (ret) 470 482 goto done; 471 483 472 484 info->offset = ntfy->node->offset; 473 - 474 485 done: 475 486 if (ret) 476 487 nouveau_abi16_ntfy_fini(chan, ntfy); ··· 483 490 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 484 491 { 485 492 struct drm_nouveau_gpuobj_free *fini = data; 486 - struct { 487 - struct nvif_ioctl_v0 ioctl; 488 - struct nvif_ioctl_del del; 489 - } args = { 490 - .ioctl.owner = NVDRM_OBJECT_ABI16, 491 - .ioctl.type = NVIF_IOCTL_V0_DEL, 492 - .ioctl.path_nr = 4, 493 - .ioctl.path[3] = NOUVEAU_ABI16_CLIENT, 494 - .ioctl.path[2] = NOUVEAU_ABI16_DEVICE, 495 - .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel), 496 - .ioctl.path[0] = fini->handle, 497 - }; 498 493 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 499 494 struct nouveau_abi16_chan *chan; 500 495 struct nouveau_abi16_ntfy *ntfy; 501 - struct nvif_client *client; 502 - int ret; 496 + int ret = -ENOENT; 503 497 504 498 if (unlikely(!abi16)) 505 499 return -ENOMEM; 506 500 507 501 chan = nouveau_abi16_chan(abi16, fini->channel); 508 502 if (!chan) 509 - return nouveau_abi16_put(abi16, -ENOENT); 510 - client = nvif_client(nvif_object(&abi16->device)); 503 + return nouveau_abi16_put(abi16, -EINVAL); 511 504 512 505 /* synchronize with the user channel and destroy the gpu object */ 513 506 nouveau_channel_idle(chan->chan); 514 507 515 - ret = nvif_client_ioctl(client, &args, sizeof(args)); 516 - if (ret) 517 - return nouveau_abi16_put(abi16, ret); 518 - 519 - /* cleanup extra state if this object was a notifier */ 520 508 list_for_each_entry(ntfy, &chan->notifiers, head) { 521 - if (ntfy->handle == fini->handle) { 522 - nvkm_mm_free(&chan->heap, &ntfy->node); 523 - list_del(&ntfy->head); 509 + if (ntfy->object.handle == fini->handle) { 510 + nouveau_abi16_ntfy_fini(chan, ntfy); 511 + ret = 0; 524 512 break; 525 513 } 526 514 } 527 515 528 - return nouveau_abi16_put(abi16, 0); 516 + return nouveau_abi16_put(abi16, ret); 529 517 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_abi16.h
··· 13 13 int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); 14 14 15 15 struct nouveau_abi16_ntfy { 16 + struct nvif_object object; 16 17 struct list_head head; 17 18 struct nvkm_mm_node *node; 18 - u32 handle; 19 19 }; 20 20 21 21 struct nouveau_abi16_chan {
+1 -1
drivers/gpu/drm/nouveau/nouveau_agp.c
··· 102 102 nouveau_agp_reset(struct nouveau_drm *drm) 103 103 { 104 104 #if __OS_HAS_AGP 105 - struct nvif_device *device = &drm->device; 105 + struct nvif_object *device = &drm->device.object; 106 106 struct drm_device *dev = drm->dev; 107 107 u32 save[2]; 108 108 int ret;
+11 -11
drivers/gpu/drm/nouveau/nouveau_backlight.c
··· 40 40 nv40_get_intensity(struct backlight_device *bd) 41 41 { 42 42 struct nouveau_drm *drm = bl_get_data(bd); 43 - struct nvif_device *device = &drm->device; 43 + struct nvif_object *device = &drm->device.object; 44 44 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & 45 45 NV40_PMC_BACKLIGHT_MASK) >> 16; 46 46 ··· 51 51 nv40_set_intensity(struct backlight_device *bd) 52 52 { 53 53 struct nouveau_drm *drm = bl_get_data(bd); 54 - struct nvif_device *device = &drm->device; 54 + struct nvif_object *device = &drm->device.object; 55 55 int val = bd->props.brightness; 56 56 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); 57 57 ··· 71 71 nv40_backlight_init(struct drm_connector *connector) 72 72 { 73 73 struct nouveau_drm *drm = nouveau_drm(connector->dev); 74 - struct nvif_device *device = &drm->device; 74 + struct nvif_object *device = &drm->device.object; 75 75 struct backlight_properties props; 76 76 struct backlight_device *bd; 77 77 ··· 97 97 { 98 98 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 99 99 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 100 - struct nvif_device *device = &drm->device; 100 + struct nvif_object *device = &drm->device.object; 101 101 int or = nv_encoder->or; 102 102 u32 div = 1025; 103 103 u32 val; ··· 112 112 { 113 113 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 114 114 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 115 - struct nvif_device *device = &drm->device; 115 + struct nvif_object *device = &drm->device.object; 116 116 int or = nv_encoder->or; 117 117 u32 div = 1025; 118 118 u32 val = (bd->props.brightness * div) / 100; ··· 133 133 { 134 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 135 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 136 - struct nvif_device *device = &drm->device; 136 + struct nvif_object *device = &drm->device.object; 137 137 int or = nv_encoder->or; 138 138 u32 div, val; 139 139 ··· 151 151 { 152 152 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 153 153 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 154 - struct nvif_device *device = &drm->device; 154 + struct nvif_object *device = &drm->device.object; 155 155 int or = nv_encoder->or; 156 156 u32 div, val; 157 157 ··· 177 177 nv50_backlight_init(struct drm_connector *connector) 178 178 { 179 179 struct nouveau_drm *drm = nouveau_drm(connector->dev); 180 - struct nvif_device *device = &drm->device; 180 + struct nvif_object *device = &drm->device.object; 181 181 struct nouveau_encoder *nv_encoder; 182 182 struct backlight_properties props; 183 183 struct backlight_device *bd; ··· 193 193 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 194 194 return 0; 195 195 196 - if (device->info.chipset <= 0xa0 || 197 - device->info.chipset == 0xaa || 198 - device->info.chipset == 0xac) 196 + if (drm->device.info.chipset <= 0xa0 || 197 + drm->device.info.chipset == 0xaa || 198 + drm->device.info.chipset == 0xac) 199 199 ops = &nv50_bl_ops; 200 200 else 201 201 ops = &nva3_bl_ops;
+6 -5
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 215 215 */ 216 216 217 217 struct nouveau_drm *drm = nouveau_drm(dev); 218 - struct nvif_device *device = &drm->device; 218 + struct nvif_object *device = &drm->device.object; 219 219 struct nvbios *bios = &drm->vbios; 220 220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 221 221 uint32_t sel_clk_binding, sel_clk; ··· 318 318 static int 319 319 get_fp_strap(struct drm_device *dev, struct nvbios *bios) 320 320 { 321 - struct nvif_device *device = &nouveau_drm(dev)->device; 321 + struct nouveau_drm *drm = nouveau_drm(dev); 322 + struct nvif_object *device = &drm->device.object; 322 323 323 324 /* 324 325 * The fp strap is normally dictated by the "User Strap" in ··· 333 332 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 334 333 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 335 334 336 - if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) 335 + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 337 336 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 338 337 else 339 338 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; ··· 635 634 */ 636 635 637 636 struct nouveau_drm *drm = nouveau_drm(dev); 638 - struct nvif_device *device = &drm->device; 637 + struct nvif_object *device = &drm->device.object; 639 638 struct nvbios *bios = &drm->vbios; 640 639 int cv = bios->chip_version; 641 640 uint16_t clktable = 0, scriptptr; ··· 1915 1914 */ 1916 1915 1917 1916 struct nouveau_drm *drm = nouveau_drm(dev); 1918 - struct nvif_device *device = &drm->device; 1917 + struct nvif_object *device = &drm->device.object; 1919 1918 uint8_t bytes_to_write; 1920 1919 uint16_t hwsq_entry_offset; 1921 1920 int i;
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1064 1064 { 1065 1065 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1066 1066 struct nouveau_channel *chan = drm->ttm.chan; 1067 - struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 1067 + struct nouveau_cli *cli = (void *)chan->user.client; 1068 1068 struct nouveau_fence *fence; 1069 1069 int ret; 1070 1070 ··· 1137 1137 if (chan == NULL) 1138 1138 continue; 1139 1139 1140 - ret = nvif_object_init(chan->object, NULL, 1140 + ret = nvif_object_init(&chan->user, 1141 1141 mthd->oclass | (mthd->engine << 16), 1142 1142 mthd->oclass, NULL, 0, 1143 1143 &drm->ttm.copy);
+25 -30
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 42 42 int 43 43 nouveau_channel_idle(struct nouveau_channel *chan) 44 44 { 45 - struct nouveau_cli *cli = (void *)nvif_client(chan->object); 45 + struct nouveau_cli *cli = (void *)chan->user.client; 46 46 struct nouveau_fence *fence = NULL; 47 47 int ret; 48 48 ··· 54 54 55 55 if (ret) 56 56 NV_PRINTK(err, cli, "failed to idle channel 0x%08x [%s]\n", 57 - chan->object->handle, nvxx_client(&cli->base)->name); 57 + chan->user.handle, nvxx_client(&cli->base)->name); 58 58 return ret; 59 59 } 60 60 ··· 70 70 nvif_object_fini(&chan->nvsw); 71 71 nvif_object_fini(&chan->gart); 72 72 nvif_object_fini(&chan->vram); 73 - nvif_object_ref(NULL, &chan->object); 73 + nvif_object_fini(&chan->user); 74 74 nvif_object_fini(&chan->push.ctxdma); 75 75 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 76 76 nouveau_bo_unmap(chan->push.buffer); 77 77 if (chan->push.buffer && chan->push.buffer->pin_refcnt) 78 78 nouveau_bo_unpin(chan->push.buffer); 79 79 nouveau_bo_ref(NULL, &chan->push.buffer); 80 - nvif_device_ref(NULL, &chan->device); 81 80 kfree(chan); 82 81 } 83 82 *pchan = NULL; ··· 86 87 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, 87 88 u32 handle, u32 size, struct nouveau_channel **pchan) 88 89 { 89 - struct nouveau_cli *cli = (void *)nvif_client(&device->base); 90 + struct nouveau_cli *cli = (void *)device->object.client; 90 91 struct nvkm_mmu *mmu = nvxx_mmu(device); 91 92 struct nv_dma_v0 args = {}; 92 93 struct nouveau_channel *chan; ··· 97 98 if (!chan) 98 99 return -ENOMEM; 99 100 100 - nvif_device_ref(device, &chan->device); 101 + chan->device = device; 101 102 chan->drm = drm; 102 103 103 104 /* allocate memory for dma push buffer */ ··· 168 169 } 169 170 } 170 171 171 - ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | 172 + ret = nvif_object_init(&device->object, NVDRM_PUSH | 172 173 (handle & 0xffff), NV_DMA_FROM_MEMORY, 173 174 &args, sizeof(args), &chan->push.ctxdma); 174 175 if (ret) { ··· 193 194 union { 194 195 struct nv50_channel_gpfifo_v0 nv50; 195 196 struct kepler_channel_gpfifo_a_v0 kepler; 196 - } args, *retn; 197 + } args; 197 198 struct nouveau_channel *chan; 198 199 u32 size; 199 200 int ret; ··· 221 222 size = sizeof(args.nv50); 222 223 } 223 224 224 - ret = nvif_object_new(nvif_object(device), handle, *oclass++, 225 - &args, size, &chan->object); 225 + ret = nvif_object_init(&device->object, handle, *oclass++, 226 + &args, size, &chan->user); 226 227 if (ret == 0) { 227 - retn = chan->object->data; 228 - if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A) 229 - chan->chid = retn->kepler.chid; 228 + if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) 229 + chan->chid = args.kepler.chid; 230 230 else 231 - chan->chid = retn->nv50.chid; 231 + chan->chid = args.nv50.chid; 232 232 return ret; 233 233 } 234 234 } while (*oclass); ··· 246 248 NV03_CHANNEL_DMA, 247 249 0 }; 248 250 const u16 *oclass = oclasses; 249 - struct nv03_channel_dma_v0 args, *retn; 251 + struct nv03_channel_dma_v0 args; 250 252 struct nouveau_channel *chan; 251 253 int ret; 252 254 ··· 262 264 args.offset = chan->push.vma.offset; 263 265 264 266 do { 265 - ret = nvif_object_new(nvif_object(device), handle, *oclass++, 266 - &args, sizeof(args), &chan->object); 267 + ret = nvif_object_init(&device->object, handle, *oclass++, 268 + &args, sizeof(args), &chan->user); 267 269 if (ret == 0) { 268 - retn = chan->object->data; 269 - chan->chid = retn->chid; 270 + chan->chid = args.chid; 270 271 return ret; 271 272 } 272 273 } while (ret && *oclass); ··· 278 281 nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) 279 282 { 280 283 struct nvif_device *device = chan->device; 281 - struct nouveau_cli *cli = (void *)nvif_client(&device->base); 284 + struct nouveau_cli *cli = (void *)chan->user.client; 282 285 struct nvkm_mmu *mmu = nvxx_mmu(device); 283 286 struct nvkm_sw_chan *swch; 284 287 struct nv_dma_v0 args = {}; 285 288 int ret, i; 286 289 287 - nvif_object_map(chan->object); 290 + nvif_object_map(&chan->user); 288 291 289 292 /* allocate dma objects to cover all allowed vram, and gart */ 290 293 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { ··· 300 303 args.limit = device->info.ram_user - 1; 301 304 } 302 305 303 - ret = nvif_object_init(chan->object, NULL, vram, 304 - NV_DMA_IN_MEMORY, &args, 305 - sizeof(args), &chan->vram); 306 + ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY, 307 + &args, sizeof(args), &chan->vram); 306 308 if (ret) 307 309 return ret; 308 310 ··· 324 328 args.limit = mmu->limit - 1; 325 329 } 326 330 327 - ret = nvif_object_init(chan->object, NULL, gart, 328 - NV_DMA_IN_MEMORY, &args, 329 - sizeof(args), &chan->gart); 331 + ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY, 332 + &args, sizeof(args), &chan->gart); 330 333 if (ret) 331 334 return ret; 332 335 } 333 336 334 337 /* initialise dma tracking parameters */ 335 - switch (chan->object->oclass & 0x00ff) { 338 + switch (chan->user.oclass & 0x00ff) { 336 339 case 0x006b: 337 340 case 0x006e: 338 341 chan->user_put = 0x40; ··· 363 368 364 369 /* allocate software object class (used for fences on <= nv05) */ 365 370 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { 366 - ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, 371 + ret = nvif_object_init(&chan->user, 0x006e, 0x006e, 367 372 NULL, 0, &chan->nvsw); 368 373 if (ret) 369 374 return ret; ··· 390 395 u32 handle, u32 arg0, u32 arg1, 391 396 struct nouveau_channel **pchan) 392 397 { 393 - struct nouveau_cli *cli = (void *)nvif_client(&device->base); 398 + struct nouveau_cli *cli = (void *)device->object.client; 394 399 bool super; 395 400 int ret; 396 401
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.h
··· 37 37 u32 user_get; 38 38 u32 user_put; 39 39 40 - struct nvif_object *object; 40 + struct nvif_object user; 41 41 }; 42 42 43 43
+2 -2
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1256 1256 break; 1257 1257 } 1258 1258 1259 - ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug, 1260 - true, NV04_DISP_NTFY_CONN, 1259 + ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true, 1260 + NV04_DISP_NTFY_CONN, 1261 1261 &(struct nvif_notify_conn_req_v0) { 1262 1262 .mask = NVIF_NOTIFY_CONN_V0_ANY, 1263 1263 .conn = index,
+3 -3
drivers/gpu/drm/nouveau/nouveau_display.c
··· 185 185 186 186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 187 187 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 188 - ret = nvif_notify_init(&disp->disp, NULL, 188 + ret = nvif_notify_init(&disp->disp, 189 189 nouveau_display_vblank_handler, false, 190 190 NV04_DISP_NTFY_VBLANK, 191 191 &(struct nvif_notify_head_req_v0) { ··· 494 494 int i; 495 495 496 496 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { 497 - ret = nvif_object_init(nvif_object(&drm->device), NULL, 497 + ret = nvif_object_init(&drm->device.object, 498 498 NVDRM_DISPLAY, oclass[i], 499 499 NULL, 0, &disp->disp); 500 500 } ··· 711 711 chan = drm->channel; 712 712 if (!chan) 713 713 return -ENODEV; 714 - cli = (void *)nvif_client(&chan->device->base); 714 + cli = (void *)chan->user.client; 715 715 716 716 s = kzalloc(sizeof(*s), GFP_KERNEL); 717 717 if (!s)
+5 -5
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 52 52 { 53 53 uint64_t val; 54 54 55 - val = nvif_rd32(chan, chan->user_get); 55 + val = nvif_rd32(&chan->user, chan->user_get); 56 56 if (chan->user_get_hi) 57 - val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32; 57 + val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32; 58 58 59 59 /* reset counter as long as GET is still advancing, this is 60 60 * to avoid misdetecting a GPU lockup if the GPU happens to ··· 82 82 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 83 83 int delta, int length) 84 84 { 85 - struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 85 + struct nouveau_cli *cli = (void *)chan->user.client; 86 86 struct nouveau_bo *pb = chan->push.buffer; 87 87 struct nvkm_vma *vma; 88 88 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; ··· 103 103 /* Flush writes. */ 104 104 nouveau_bo_rd32(pb, 0); 105 105 106 - nvif_wr32(chan, 0x8c, chan->dma.ib_put); 106 + nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put); 107 107 chan->dma.ib_free--; 108 108 } 109 109 ··· 113 113 uint32_t cnt = 0, prev_get = 0; 114 114 115 115 while (chan->dma.ib_free < count) { 116 - uint32_t get = nvif_rd32(chan, 0x88); 116 + uint32_t get = nvif_rd32(&chan->user, 0x88); 117 117 if (get != prev_get) { 118 118 prev_get = get; 119 119 cnt = 0;
+1 -1
drivers/gpu/drm/nouveau/nouveau_dma.h
··· 140 140 #define WRITE_PUT(val) do { \ 141 141 mb(); \ 142 142 nouveau_bo_rd32(chan->push.buffer, 0); \ 143 - nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ 143 + nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ 144 144 } while (0) 145 145 146 146 static inline void
+9 -9
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 114 114 snprintf(cli->name, sizeof(cli->name), "%s", sname); 115 115 cli->dev = dev; 116 116 117 - ret = nvif_client_init(NULL, NULL, cli->name, nouveau_name(dev), 117 + ret = nvif_client_init(NULL, cli->name, nouveau_name(dev), 118 118 nouveau_config, nouveau_debug, 119 119 &cli->base); 120 120 if (ret == 0) { ··· 163 163 /*XXX: this is crap, but the fence/channel stuff is a little 164 164 * backwards in some places. this will be fixed. 165 165 */ 166 - ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass)); 166 + ret = nvif_object_sclass(&device->object, sclass, ARRAY_SIZE(sclass)); 167 167 if (ret < 0) 168 168 return; 169 169 ··· 235 235 return; 236 236 } 237 237 238 - ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW, 238 + ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW, 239 239 nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); 240 240 if (ret == 0) { 241 241 struct nvkm_sw_chan *swch; ··· 262 262 } 263 263 264 264 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { 265 - ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32, 265 + ret = nvkm_gpuobj_new(nvxx_object(&drm->device.object), NULL, 32, 266 266 0, 0, &drm->notify); 267 267 if (ret) { 268 268 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); ··· 270 270 return; 271 271 } 272 272 273 - ret = nvif_object_init(drm->channel->object, NULL, NvNotify0, 273 + ret = nvif_object_init(&drm->channel->user, NvNotify0, 274 274 NV_DMA_IN_MEMORY, 275 275 &(struct nv_dma_v0) { 276 276 .target = NV_DMA_V0_TARGET_VRAM, ··· 392 392 393 393 nouveau_get_hdmi_dev(drm); 394 394 395 - ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE, 396 - NV_DEVICE, 395 + ret = nvif_device_init(&drm->client.base.object, 396 + NVDRM_DEVICE, NV_DEVICE, 397 397 &(struct nv_device_v0) { 398 398 .device = ~0, 399 399 }, sizeof(struct nv_device_v0), ··· 408 408 * better fix is found - assuming there is one... 409 409 */ 410 410 if (drm->device.info.chipset == 0xc1) 411 - nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000); 411 + nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000); 412 412 413 413 nouveau_vga_init(drm); 414 414 nouveau_agp_init(drm); ··· 736 736 ret = nouveau_do_resume(drm_dev, true); 737 737 drm_kms_helper_poll_enable(drm_dev); 738 738 /* do magic */ 739 - nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); 739 + nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 740 740 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 741 741 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 742 742 return ret;
+7 -8
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 169 169 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 170 170 { 171 171 struct nouveau_fence_priv *priv = (void*)chan->drm->fence; 172 - struct nouveau_cli *cli = (void *)nvif_client(chan->object); 172 + struct nouveau_cli *cli = (void *)chan->user.client; 173 173 int ret; 174 174 175 175 INIT_LIST_HEAD(&fctx->flip); ··· 188 188 if (!priv->uevent) 189 189 return; 190 190 191 - ret = nvif_notify_init(chan->object, NULL, 192 - nouveau_fence_wait_uevent_handler, false, 193 - G82_CHANNEL_DMA_V0_NTFY_UEVENT, 194 - &(struct nvif_notify_uevent_req) { }, 195 - sizeof(struct nvif_notify_uevent_req), 196 - sizeof(struct nvif_notify_uevent_rep), 197 - &fctx->notify); 191 + ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler, 192 + false, G82_CHANNEL_DMA_V0_NTFY_UEVENT, 193 + &(struct nvif_notify_uevent_req) { }, 194 + sizeof(struct nvif_notify_uevent_req), 195 + sizeof(struct nvif_notify_uevent_rep), 196 + &fctx->notify); 198 197 199 198 WARN_ON(ret); 200 199 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 681 681 return -ENOMEM; 682 682 683 683 list_for_each_entry(temp, &abi16->channels, head) { 684 - if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { 684 + if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) { 685 685 chan = temp->chan; 686 686 break; 687 687 }
+2 -2
drivers/gpu/drm/nouveau/nouveau_sysfs.c
··· 188 188 if (!sysfs) 189 189 return -ENOMEM; 190 190 191 - ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL, 191 + ret = nvif_object_init(&device->object, NVDRM_CONTROL, 192 192 NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, 193 - &sysfs->ctrl); 193 + &sysfs->ctrl); 194 194 if (ret == 0) 195 195 device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate); 196 196
+5 -4
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 12 12 static unsigned int 13 13 nouveau_vga_set_decode(void *priv, bool state) 14 14 { 15 - struct nvif_device *device = &nouveau_drm(priv)->device; 15 + struct nouveau_drm *drm = nouveau_drm(priv); 16 + struct nvif_object *device = &drm->device.object; 16 17 17 - if (device->info.family == NV_DEVICE_INFO_V0_CURIE && 18 - device->info.chipset >= 0x4c) 18 + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE && 19 + drm->device.info.chipset >= 0x4c) 19 20 nvif_wr32(device, 0x088060, state); 20 21 else 21 - if (device->info.chipset >= 0x40) 22 + if (drm->device.info.chipset >= 0x40) 22 23 nvif_wr32(device, 0x088054, state); 23 24 else 24 25 nvif_wr32(device, 0x001854, state);
+6 -6
drivers/gpu/drm/nouveau/nv04_fbcon.c
··· 171 171 return -EINVAL; 172 172 } 173 173 174 - ret = nvif_object_init(chan->object, NULL, 0x0062, 174 + ret = nvif_object_init(&chan->user, 0x0062, 175 175 device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ? 176 176 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d); 177 177 if (ret) 178 178 return ret; 179 179 180 - ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0, 180 + ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0, 181 181 &nfbdev->clip); 182 182 if (ret) 183 183 return ret; 184 184 185 - ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0, 185 + ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0, 186 186 &nfbdev->rop); 187 187 if (ret) 188 188 return ret; 189 189 190 - ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0, 190 + ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0, 191 191 &nfbdev->patt); 192 192 if (ret) 193 193 return ret; 194 194 195 - ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0, 195 + ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0, 196 196 &nfbdev->gdi); 197 197 if (ret) 198 198 return ret; 199 199 200 - ret = nvif_object_init(chan->object, NULL, 0x005f, 200 + ret = nvif_object_init(&chan->user, 0x005f, 201 201 device->info.chipset >= 0x11 ? 0x009f : 0x005f, 202 202 NULL, 0, &nfbdev->blit); 203 203 if (ret)
+1 -1
drivers/gpu/drm/nouveau/nv04_fence.c
··· 57 57 static u32 58 58 nv04_fence_read(struct nouveau_channel *chan) 59 59 { 60 - struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);; 60 + struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(&chan->user); 61 61 return atomic_read(&fifo->refcnt); 62 62 } 63 63
+1 -1
drivers/gpu/drm/nouveau/nv10_fence.c
··· 50 50 u32 51 51 nv10_fence_read(struct nouveau_channel *chan) 52 52 { 53 - return nvif_rd32(chan, 0x0048); 53 + return nvif_rd32(&chan->user, 0x0048); 54 54 } 55 55 56 56 void
+2 -2
drivers/gpu/drm/nouveau/nv17_fence.c
··· 33 33 nv17_fence_sync(struct nouveau_fence *fence, 34 34 struct nouveau_channel *prev, struct nouveau_channel *chan) 35 35 { 36 - struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base); 36 + struct nouveau_cli *cli = (void *)prev->user.client; 37 37 struct nv10_fence_priv *priv = chan->drm->fence; 38 38 struct nv10_fence_chan *fctx = chan->fence; 39 39 u32 value; ··· 89 89 fctx->base.read = nv10_fence_read; 90 90 fctx->base.sync = nv17_fence_sync; 91 91 92 - ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY, 92 + ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY, 93 93 &(struct nv_dma_v0) { 94 94 .target = NV_DMA_V0_TARGET_VRAM, 95 95 .access = NV_DMA_V0_ACCESS_RDWR,
+59 -51
drivers/gpu/drm/nouveau/nv50_display.c
··· 60 60 61 61 struct nv50_chan { 62 62 struct nvif_object user; 63 + struct nvif_device *device; 63 64 }; 64 65 65 66 static int 66 - nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, 67 - void *data, u32 size, struct nv50_chan *chan) 67 + nv50_chan_create(struct nvif_device *device, struct nvif_object *disp, 68 + const u32 *oclass, u8 head, void *data, u32 size, 69 + struct nv50_chan *chan) 68 70 { 69 71 const u32 handle = (oclass[0] << 16) | head; 70 72 u32 sclass[8]; 71 73 int ret, i; 74 + 75 + chan->device = device; 72 76 73 77 ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass)); 74 78 WARN_ON(ret > ARRAY_SIZE(sclass)); ··· 82 78 while (oclass[0]) { 83 79 for (i = 0; i < ARRAY_SIZE(sclass); i++) { 84 80 if (sclass[i] == oclass[0]) { 85 - ret = nvif_object_init(disp, NULL, handle, 86 - oclass[0], data, size, 87 - &chan->user); 81 + ret = nvif_object_init(disp, handle, oclass[0], 82 + data, size, &chan->user); 88 83 if (ret == 0) 89 84 nvif_object_map(&chan->user); 90 85 return ret; ··· 116 113 } 117 114 118 115 static int 119 - nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head, 120 - void *data, u32 size, struct nv50_pioc *pioc) 116 + nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp, 117 + const u32 *oclass, u8 head, void *data, u32 size, 118 + struct nv50_pioc *pioc) 121 119 { 122 - return nv50_chan_create(disp, oclass, head, data, size, &pioc->base); 120 + return nv50_chan_create(device, disp, oclass, head, data, size, 121 + &pioc->base); 123 122 } 124 123 125 124 /****************************************************************************** ··· 133 128 }; 134 129 135 130 static int 136 - nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs) 131 + nv50_curs_create(struct nvif_device *device, struct nvif_object *disp, 132 + int head, struct nv50_curs *curs) 137 133 { 138 134 struct nv50_disp_cursor_v0 args = { 139 135 .head = head, ··· 148 142 0 149 143 }; 150 144 151 - return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), 152 - &curs->base); 145 + return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args), 146 + &curs->base); 153 147 } 154 148 155 149 /****************************************************************************** ··· 161 155 }; 162 156 163 157 static int 164 - nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm) 158 + nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp, 159 + int head, struct nv50_oimm *oimm) 165 160 { 166 161 struct nv50_disp_cursor_v0 args = { 167 162 .head = head, ··· 176 169 0 177 170 }; 178 171 179 - return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), 180 - &oimm->base); 172 + return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args), 173 + &oimm->base); 181 174 } 182 175 183 176 /****************************************************************************** ··· 201 194 static void 202 195 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp) 203 196 { 197 + struct nvif_device *device = dmac->base.device; 198 + 204 199 nvif_object_fini(&dmac->vram); 205 200 nvif_object_fini(&dmac->sync); 206 201 207 202 nv50_chan_destroy(&dmac->base); 208 203 209 204 if (dmac->ptr) { 210 - struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev; 205 + struct pci_dev *pdev = nvxx_device(device)->pdev; 211 206 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); 212 207 } 213 208 } 214 209 215 210 static int 216 - nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head, 217 - void *data, u32 size, u64 syncbuf, 211 + nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, 212 + const u32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, 218 213 struct nv50_dmac *dmac) 219 214 { 220 - struct nvif_device *device = nvif_device(disp); 221 215 struct nv50_disp_core_channel_dma_v0 *args = data; 222 216 struct nvif_object pushbuf; 223 217 int ret; ··· 230 222 if (!dmac->ptr) 231 223 return -ENOMEM; 232 224 233 - ret = nvif_object_init(nvif_object(device), NULL, 234 - args->pushbuf, NV_DMA_FROM_MEMORY, 235 - &(struct nv_dma_v0) { 225 + ret = nvif_object_init(&device->object, args->pushbuf, 226 + NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) { 236 227 .target = NV_DMA_V0_TARGET_PCI_US, 237 228 .access = NV_DMA_V0_ACCESS_RD, 238 229 .start = dmac->handle + 0x0000, ··· 240 233 if (ret) 241 234 return ret; 242 235 243 - ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base); 236 + ret = nv50_chan_create(device, disp, oclass, head, data, size, 237 + &dmac->base); 244 238 nvif_object_fini(&pushbuf); 245 239 if (ret) 246 240 return ret; 247 241 248 - ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000, 249 - NV_DMA_IN_MEMORY, 242 + ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY, 250 243 &(struct nv_dma_v0) { 251 244 .target = NV_DMA_V0_TARGET_VRAM, 252 245 .access = NV_DMA_V0_ACCESS_RDWR, ··· 257 250 if (ret) 258 251 return ret; 259 252 260 - ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001, 261 - NV_DMA_IN_MEMORY, 253 + ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY, 262 254 &(struct nv_dma_v0) { 263 255 .target = NV_DMA_V0_TARGET_VRAM, 264 256 .access = NV_DMA_V0_ACCESS_RDWR, ··· 280 274 }; 281 275 282 276 static int 283 - nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core) 277 + nv50_core_create(struct nvif_device *device, struct nvif_object *disp, 278 + u64 syncbuf, struct nv50_mast *core) 284 279 { 285 280 struct nv50_disp_core_channel_dma_v0 args = { 286 281 .pushbuf = 0xb0007d00, ··· 300 293 0 301 294 }; 302 295 303 - return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf, 304 - &core->base); 296 + return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args), 297 + syncbuf, &core->base); 305 298 } 306 299 307 300 /****************************************************************************** ··· 315 308 }; 316 309 317 310 static int 318 - nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf, 319 - struct nv50_sync *base) 311 + nv50_base_create(struct nvif_device *device, struct nvif_object *disp, 312 + int head, u64 syncbuf, struct nv50_sync *base) 320 313 { 321 314 struct nv50_disp_base_channel_dma_v0 args = { 322 315 .pushbuf = 0xb0007c00 | head, ··· 333 326 0 334 327 }; 335 328 336 - return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), 329 + return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args), 337 330 syncbuf, &base->base); 338 331 } 339 332 ··· 346 339 }; 347 340 348 341 static int 349 - nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf, 350 - struct nv50_ovly *ovly) 342 + nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp, 343 + int head, u64 syncbuf, struct nv50_ovly *ovly) 351 344 { 352 345 struct nv50_disp_overlay_channel_dma_v0 args = { 353 346 .pushbuf = 0xb0007e00 | head, ··· 363 356 0 364 357 }; 365 358 366 - return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), 359 + return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args), 367 360 syncbuf, &ovly->base); 368 361 } 369 362 ··· 420 413 evo_wait(void *evoc, int nr) 421 414 { 422 415 struct nv50_dmac *dmac = evoc; 423 - struct nvif_device *device = nvif_device(&dmac->base.user); 416 + struct nvif_device *device = dmac->base.device; 424 417 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4; 425 418 426 419 mutex_lock(&dmac->lock); ··· 580 573 if (unlikely(push == NULL)) 581 574 return -EBUSY; 582 575 583 - if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) { 576 + if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) { 584 577 ret = RING_SPACE(chan, 8); 585 578 if (ret) 586 579 return ret; ··· 594 587 OUT_RING (chan, sync->addr); 595 588 OUT_RING (chan, sync->data); 596 589 } else 597 - if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) { 590 + if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) { 598 591 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; 599 592 ret = RING_SPACE(chan, 12); 600 593 if (ret) ··· 1425 1418 static int 1426 1419 nv50_crtc_create(struct drm_device *dev, int index) 1427 1420 { 1421 + struct nouveau_drm *drm = nouveau_drm(dev); 1422 + struct nvif_device *device = &drm->device; 1428 1423 struct nv50_disp *disp = nv50_disp(dev); 1429 1424 struct nv50_head *head; 1430 1425 struct drm_crtc *crtc; ··· 1471 1462 goto out; 1472 1463 1473 1464 /* allocate cursor resources */ 1474 - ret = nv50_curs_create(disp->disp, index, &head->curs); 1465 + ret = nv50_curs_create(device, disp->disp, index, &head->curs); 1475 1466 if (ret) 1476 1467 goto out; 1477 1468 1478 1469 /* allocate page flip / sync resources */ 1479 - ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, 1480 - &head->sync); 1470 + ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset, 1471 + &head->sync); 1481 1472 if (ret) 1482 1473 goto out; 1483 1474 ··· 1485 1476 head->sync.data = 0x00000000; 1486 1477 1487 1478 /* allocate overlay resources */ 1488 - ret = nv50_oimm_create(disp->disp, index, &head->oimm); 1479 + ret = nv50_oimm_create(device, disp->disp, index, &head->oimm); 1489 1480 if (ret) 1490 1481 goto out; 1491 1482 1492 - ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset, 1493 - &head->ovly); 1483 + ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset, 1484 + &head->ovly); 1494 1485 if (ret) 1495 1486 goto out; 1496 1487 ··· 2379 2370 2380 2371 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2381 2372 struct nv50_head *head = nv50_head(crtc); 2382 - int ret = nvif_object_init(&head->sync.base.base.user, NULL, 2383 - name, NV_DMA_IN_MEMORY, &args, size, 2373 + int ret = nvif_object_init(&head->sync.base.base.user, name, 2374 + NV_DMA_IN_MEMORY, &args, size, 2384 2375 &fbdma->base[head->base.index]); 2385 2376 if (ret) { 2386 2377 nv50_fbdma_fini(fbdma); ··· 2388 2379 } 2389 2380 } 2390 2381 2391 - ret = nvif_object_init(&mast->base.base.user, NULL, name, 2392 - NV_DMA_IN_MEMORY, &args, size, 2393 - &fbdma->core); 2382 + ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY, 2383 + &args, size, &fbdma->core); 2394 2384 if (ret) { 2395 2385 nv50_fbdma_fini(fbdma); 2396 2386 return ret; ··· 2542 2534 goto out; 2543 2535 2544 2536 /* allocate master evo channel */ 2545 - ret = nv50_core_create(disp->disp, disp->sync->bo.offset, 2537 + ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset, 2546 2538 &disp->mast); 2547 2539 if (ret) 2548 2540 goto out; 2549 2541 2550 2542 /* create crtc objects to represent the hw heads */ 2551 2543 if (disp->disp->oclass >= GF110_DISP) 2552 - crtcs = nvif_rd32(device, 0x022448); 2544 + crtcs = nvif_rd32(&device->object, 0x022448); 2553 2545 else 2554 2546 crtcs = 2; 2555 2547
+1 -1
drivers/gpu/drm/nouveau/nv50_fbcon.c
··· 183 183 return -EINVAL; 184 184 } 185 185 186 - ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0, 186 + ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0, 187 187 &nfbdev->twod); 188 188 if (ret) 189 189 return ret;
+2 -2
drivers/gpu/drm/nouveau/nv50_fence.c
··· 51 51 fctx->base.read = nv10_fence_read; 52 52 fctx->base.sync = nv17_fence_sync; 53 53 54 - ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY, 54 + ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY, 55 55 &(struct nv_dma_v0) { 56 56 .target = NV_DMA_V0_TARGET_VRAM, 57 57 .access = NV_DMA_V0_ACCESS_RDWR, ··· 66 66 u32 start = bo->bo.mem.start * PAGE_SIZE; 67 67 u32 limit = start + bo->bo.mem.size - 1; 68 68 69 - ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i, 69 + ret = nvif_object_init(&chan->user, NvEvoSema0 + i, 70 70 NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { 71 71 .target = NV_DMA_V0_TARGET_VRAM, 72 72 .access = NV_DMA_V0_ACCESS_RDWR,
+1 -1
drivers/gpu/drm/nouveau/nv84_fence.c
··· 131 131 int 132 132 nv84_fence_context_new(struct nouveau_channel *chan) 133 133 { 134 - struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); 134 + struct nouveau_cli *cli = (void *)chan->user.client; 135 135 struct nv84_fence_priv *priv = chan->drm->fence; 136 136 struct nv84_fence_chan *fctx; 137 137 int ret, i;
+1 -1
drivers/gpu/drm/nouveau/nvc0_fbcon.c
··· 156 156 struct nouveau_channel *chan = drm->channel; 157 157 int ret, format; 158 158 159 - ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0, 159 + ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0, 160 160 &nfbdev->twod); 161 161 if (ret) 162 162 return ret;
+15 -46
drivers/gpu/drm/nouveau/nvif/client.c
··· 29 29 int 30 30 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) 31 31 { 32 - return client->driver->ioctl(client->base.priv, client->super, data, size, NULL); 32 + return client->driver->ioctl(client->object.priv, client->super, data, size, NULL); 33 33 } 34 34 35 35 int 36 36 nvif_client_suspend(struct nvif_client *client) 37 37 { 38 - return client->driver->suspend(client->base.priv); 38 + return client->driver->suspend(client->object.priv); 39 39 } 40 40 41 41 int 42 42 nvif_client_resume(struct nvif_client *client) 43 43 { 44 - return client->driver->resume(client->base.priv); 44 + return client->driver->resume(client->object.priv); 45 45 } 46 46 47 47 void 48 48 nvif_client_fini(struct nvif_client *client) 49 49 { 50 50 if (client->driver) { 51 - client->driver->fini(client->base.priv); 51 + client->driver->fini(client->object.priv); 52 52 client->driver = NULL; 53 - client->base.parent = NULL; 54 - nvif_object_fini(&client->base); 53 + client->object.parent = NULL; 54 + client->object.client = NULL; 55 + nvif_object_fini(&client->object); 55 56 } 56 57 } 57 58 ··· 69 68 }; 70 69 71 70 int 72 - nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver, 73 - const char *name, u64 device, const char *cfg, const char *dbg, 74 - struct nvif_client *client) 71 + nvif_client_init(const char *driver, const char *name, u64 device, 72 + const char *cfg, const char *dbg, struct nvif_client *client) 75 73 { 76 74 int ret, i; 77 75 78 - ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base); 76 + ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object); 79 77 if (ret) 80 78 return ret; 81 79 82 - client->base.parent = &client->base; 83 - client->base.handle = ~0; 84 - client->object = &client->base; 80 + client->object.client = client; 81 + client->object.parent = &client->object; 82 + client->object.handle = ~0; 83 + client->route = NVIF_IOCTL_V0_ROUTE_NVIF; 85 84 client->super = true; 86 85 87 86 for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) { 88 87 if (!driver || !strcmp(client->driver->name, driver)) { 89 88 ret = client->driver->init(name, device, cfg, dbg, 90 - &client->base.priv); 89 + &client->object.priv); 91 90 if (!ret || driver) 92 91 break; 93 92 } ··· 96 95 if (ret) 97 96 nvif_client_fini(client); 98 97 return ret; 99 - } 100 - 101 - static void 102 - nvif_client_del(struct nvif_client *client) 103 - { 104 - nvif_client_fini(client); 105 - kfree(client); 106 - } 107 - 108 - int 109 - nvif_client_new(const char *driver, const char *name, u64 device, 110 - const char *cfg, const char *dbg, 111 - struct nvif_client **pclient) 112 - { 113 - struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL); 114 - if (client) { 115 - int ret = nvif_client_init(nvif_client_del, driver, name, 116 - device, cfg, dbg, client); 117 - if (ret) { 118 - kfree(client); 119 - client = NULL; 120 - } 121 - *pclient = client; 122 - return ret; 123 - } 124 - return -ENOMEM; 125 - } 126 - 127 - void 128 - nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient) 129 - { 130 - nvif_object_ref(&client->base, (struct nvif_object **)pclient); 131 98 }
+6 -39
drivers/gpu/drm/nouveau/nvif/device.c
··· 33 33 void 34 34 nvif_device_fini(struct nvif_device *device) 35 35 { 36 - nvif_object_fini(&device->base); 36 + nvif_object_fini(&device->object); 37 37 } 38 38 39 39 int 40 - nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *), 41 - u32 handle, u32 oclass, void *data, u32 size, 42 - struct nvif_device *device) 40 + nvif_device_init(struct nvif_object *parent, u32 handle, u32 oclass, 41 + void *data, u32 size, struct nvif_device *device) 43 42 { 44 - int ret = nvif_object_init(parent, (void *)dtor, handle, oclass, 45 - data, size, &device->base); 43 + int ret = nvif_object_init(parent, handle, oclass, data, size, 44 + &device->object); 46 45 if (ret == 0) { 47 - device->object = &device->base; 48 46 device->info.version = 0; 49 - ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO, 47 + ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO, 50 48 &device->info, sizeof(device->info)); 51 49 } 52 50 return ret; 53 - } 54 - 55 - static void 56 - nvif_device_del(struct nvif_device *device) 57 - { 58 - nvif_device_fini(device); 59 - kfree(device); 60 - } 61 - 62 - int 63 - nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass, 64 - void *data, u32 size, struct nvif_device **pdevice) 65 - { 66 - struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL); 67 - if (device) { 68 - int ret = nvif_device_init(parent, nvif_device_del, handle, 69 - oclass, data, size, device); 70 - if (ret) { 71 - kfree(device); 72 - device = NULL; 73 - } 74 - *pdevice = device; 75 - return ret; 76 - } 77 - return -ENOMEM; 78 - } 79 - 80 - void 81 - nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice) 82 - { 83 - nvif_object_ref(&device->base, (struct nvif_object **)pdevice); 84 51 }
+6 -43
drivers/gpu/drm/nouveau/nvif/notify.c
··· 124 124 } 125 125 126 126 if (!WARN_ON(notify == NULL)) { 127 - struct nvif_client *client = nvif_client(notify->object); 127 + struct nvif_client *client = notify->object->client; 128 128 if (!WARN_ON(notify->size != size)) { 129 129 atomic_inc(&notify->putcnt); 130 130 if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) { ··· 156 156 if (ret >= 0 && object) { 157 157 ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); 158 158 if (ret == 0) { 159 - nvif_object_ref(NULL, &notify->object); 159 + notify->object = NULL; 160 160 kfree((void *)notify->data); 161 161 } 162 162 } ··· 164 164 } 165 165 166 166 int 167 - nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *), 168 - int (*func)(struct nvif_notify *), bool work, u8 event, 169 - void *data, u32 size, u32 reply, struct nvif_notify *notify) 167 + nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *), 168 + bool work, u8 event, void *data, u32 size, u32 reply, 169 + struct nvif_notify *notify) 170 170 { 171 171 struct { 172 172 struct nvif_ioctl_v0 ioctl; ··· 175 175 } *args; 176 176 int ret = -ENOMEM; 177 177 178 - notify->object = NULL; 179 - nvif_object_ref(object, &notify->object); 178 + notify->object = object; 180 179 notify->flags = 0; 181 180 atomic_set(&notify->putcnt, 1); 182 - notify->dtor = dtor; 183 181 notify->func = func; 184 182 notify->data = NULL; 185 183 notify->size = reply; ··· 208 210 if (ret) 209 211 nvif_notify_fini(notify); 210 212 return ret; 211 - } 212 - 213 - static void 214 - nvif_notify_del(struct nvif_notify *notify) 215 - { 216 - nvif_notify_fini(notify); 217 - kfree(notify); 218 - } 219 - 220 - void 221 - nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify) 222 - { 223 - BUG_ON(notify != NULL); 224 - if (*pnotify) 225 - (*pnotify)->dtor(*pnotify); 226 - *pnotify = notify; 227 - } 228 - 229 - int 230 - nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *), 231 - bool work, u8 type, void *data, u32 size, u32 reply, 232 - struct nvif_notify **pnotify) 233 - { 234 - struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL); 235 - if (notify) { 236 - int ret = nvif_notify_init(object, nvif_notify_del, func, work, 237 - type, data, size, reply, notify); 238 - if (ret) { 239 - kfree(notify); 240 - notify = NULL; 241 - } 242 - *pnotify = notify; 243 - return ret; 244 - } 245 - return -ENOMEM; 246 213 }
+40 -93
drivers/gpu/drm/nouveau/nvif/object.c
··· 30 30 int 31 31 nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) 32 32 { 33 - struct nvif_client *client = nvif_client(object); 33 + struct nvif_client *client = object->client; 34 34 union { 35 35 struct nvif_ioctl_v0 v0; 36 36 } *args = data; ··· 47 47 } else 48 48 return -ENOSYS; 49 49 50 - return client->driver->ioctl(client->base.priv, client->super, data, size, hack); 50 + return client->driver->ioctl(client->object.priv, client->super, 51 + data, size, hack); 51 52 } 52 53 53 54 int ··· 146 145 nvif_object_unmap(struct nvif_object *object) 147 146 { 148 147 if (object->map.size) { 149 - struct nvif_client *client = nvif_client(object); 148 + struct nvif_client *client = object->client; 150 149 struct { 151 150 struct nvif_ioctl_v0 ioctl; 152 151 struct nvif_ioctl_unmap unmap; ··· 168 167 int 169 168 nvif_object_map(struct nvif_object *object) 170 169 { 171 - struct nvif_client *client = nvif_client(object); 170 + struct nvif_client *client = object->client; 172 171 struct { 173 172 struct nvif_ioctl_v0 ioctl; 174 173 struct nvif_ioctl_map_v0 map; ··· 187 186 return ret; 188 187 } 189 188 190 - struct ctor { 191 - struct nvif_ioctl_v0 ioctl; 192 - struct nvif_ioctl_new_v0 new; 193 - }; 194 - 195 189 void 196 190 nvif_object_fini(struct nvif_object *object) 197 191 { 198 - struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data); 199 - if (object->parent) { 200 - struct { 201 - struct nvif_ioctl_v0 ioctl; 202 - struct nvif_ioctl_del del; 203 - } args = { 204 - .ioctl.type = NVIF_IOCTL_V0_DEL, 205 - }; 192 + struct { 193 + struct nvif_ioctl_v0 ioctl; 194 + struct nvif_ioctl_del del; 195 + } args = { 196 + .ioctl.type = NVIF_IOCTL_V0_DEL, 197 + }; 206 198 207 - nvif_object_unmap(object); 208 - nvif_object_ioctl(object, &args, sizeof(args), NULL); 209 - if (object->data) { 210 - object->size = 0; 211 - object->data = NULL; 212 - kfree(ctor); 213 - } 214 - nvif_object_ref(NULL, &object->parent); 215 - } 199 + if (!object->client) 200 + return; 201 + 202 + nvif_object_unmap(object); 203 + nvif_object_ioctl(object, &args, sizeof(args), NULL); 204 + object->client = NULL; 216 205 } 217 206 218 207 int 219 - nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *), 220 - u32 handle, u32 oclass, void *data, u32 size, 221 - struct nvif_object *object) 208 + nvif_object_init(struct nvif_object *parent, u32 handle, u32 oclass, 209 + void *data, u32 size, struct nvif_object *object) 222 210 { 223 - struct ctor *ctor; 211 + struct { 212 + struct nvif_ioctl_v0 ioctl; 213 + struct nvif_ioctl_new_v0 new; 214 + } *args; 224 215 int ret = 0; 225 216 226 - object->parent = NULL; 227 - object->object = object; 228 - nvif_object_ref(parent, &object->parent); 229 - kref_init(&object->refcount); 217 + object->client = NULL; 218 + object->parent = parent; 230 219 object->handle = handle; 231 220 object->oclass = oclass; 232 - object->data = NULL; 233 - object->size = 0; 234 - object->dtor = dtor; 235 221 object->map.ptr = NULL; 236 222 object->map.size = 0; 237 223 238 224 if (object->parent) { 239 - if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) { 225 + if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) { 240 226 nvif_object_fini(object); 241 227 return -ENOMEM; 242 228 } 243 - object->data = ctor->new.data; 244 - object->size = size; 245 - memcpy(object->data, data, size); 246 229 247 - ctor->ioctl.version = 0; 248 - ctor->ioctl.type = NVIF_IOCTL_V0_NEW; 249 - ctor->new.version = 0; 250 - ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF; 251 - ctor->new.token = (unsigned long)(void *)object; 252 - ctor->new.handle = handle; 253 - ctor->new.oclass = oclass; 230 + args->ioctl.version = 0; 231 + args->ioctl.type = NVIF_IOCTL_V0_NEW; 232 + args->new.version = 0; 233 + args->new.route = parent->client->route; 234 + args->new.token = (unsigned long)(void *)object; 235 + args->new.handle = handle; 236 + args->new.oclass = oclass; 254 237 255 - ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) + 256 - object->size, &object->priv); 238 + memcpy(args->new.data, data, size); 239 + ret = nvif_object_ioctl(parent, args, sizeof(*args) + size, 240 + &object->priv); 241 + memcpy(data, args->new.data, size); 242 + kfree(args); 243 + if (ret == 0) 244 + object->client = parent->client; 257 245 } 258 246 259 247 if (ret) 260 248 nvif_object_fini(object); 261 249 return ret; 262 - } 263 - 264 - static void 265 - nvif_object_del(struct nvif_object *object) 266 - { 267 - nvif_object_fini(object); 268 - kfree(object); 269 - } 270 - 271 - int 272 - nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass, 273 - void *data, u32 size, struct nvif_object **pobject) 274 - { 275 - struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL); 276 - if (object) { 277 - int ret = nvif_object_init(parent, nvif_object_del, handle, 278 - oclass, data, size, object); 279 - if (ret) { 280 - kfree(object); 281 - object = NULL; 282 - } 283 - *pobject = object; 284 - return ret; 285 - } 286 - return -ENOMEM; 287 - } 288 - 289 - static void 290 - nvif_object_put(struct kref *kref) 291 - { 292 - struct nvif_object *object = 293 - container_of(kref, typeof(*object), refcount); 294 - object->dtor(object); 295 - } 296 - 297 - void 298 - nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject) 299 - { 300 - if (object) 301 - kref_get(&object->refcount); 302 - if (*pobject) 303 - kref_put(&(*pobject)->refcount, nvif_object_put); 304 - *pobject = object; 305 250 }