drm/i915: Implement batch and ring buffer dumping

We create a debugfs node (i915_ringbuffer_data) to expose a hex dump
of the ring buffer itself. We also expose another debugfs node
(i915_ringbuffer_info) with information on the state (i.e. head, tail
addresses) of the ringbuffer.

For batchbuffer dumping, we look at the device's active_list, dumping
each object which has I915_GEM_DOMAIN_COMMAND in its read
domains. This is all exposed through the dri/i915_batchbuffers debugfs
file with a header for each object (giving the objects gtt_offset so
that it can be matched against the offset given in the
BATCH_BUFFER_START command.

Signed-off-by: Ben Gamari <bgamari@gmail.com>
Signed-off-by: Carl Worth <cworth@cworth.org>
Signed-off-by: Eric Anholt <eric@anholt.net>

authored by

Ben Gamari and committed by
Eric Anholt
6911a9b8 8fe74cf0

+98 -5
+2
drivers/gpu/drm/i915/i915_drv.h
··· 635 635 void i915_gem_detach_phys_object(struct drm_device *dev, 636 636 struct drm_gem_object *obj); 637 637 void i915_gem_free_all_phys_object(struct drm_device *dev); 638 + int i915_gem_object_get_pages(struct drm_gem_object *obj); 639 + void i915_gem_object_put_pages(struct drm_gem_object *obj); 638 640 639 641 /* i915_gem_tiling.c */ 640 642 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+3 -5
drivers/gpu/drm/i915/i915_gem.c
··· 43 43 uint64_t offset, 44 44 uint64_t size); 45 45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 46 - static int i915_gem_object_get_pages(struct drm_gem_object *obj); 47 - static void i915_gem_object_put_pages(struct drm_gem_object *obj); 48 46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 49 47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 50 48 unsigned alignment); ··· 1283 1285 return 0; 1284 1286 } 1285 1287 1286 - static void 1288 + void 1287 1289 i915_gem_object_put_pages(struct drm_gem_object *obj) 1288 1290 { 1289 1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 1882 1884 return ret; 1883 1885 } 1884 1886 1885 - static int 1887 + int 1886 1888 i915_gem_object_get_pages(struct drm_gem_object *obj) 1887 1889 { 1888 1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 3241 3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3242 3244 3243 3245 #if WATCH_EXEC 3244 - i915_gem_dump_object(object_list[args->buffer_count - 1], 3246 + i915_gem_dump_object(batch_obj, 3245 3247 args->batch_len, 3246 3248 __func__, 3247 3249 ~0);
+93
drivers/gpu/drm/i915/i915_gem_debugfs.c
··· 234 234 return 0; 235 235 } 236 236 237 + static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) 238 + { 239 + int page, i; 240 + uint32_t *mem; 241 + 242 + for (page = 0; page < page_count; page++) { 243 + mem = kmap(pages[page]); 244 + for (i = 0; i < PAGE_SIZE; i += 4) 245 + seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 246 + kunmap(pages[page]); 247 + } 248 + } 249 + 250 + static int i915_batchbuffer_info(struct seq_file *m, void *data) 251 + { 252 + struct drm_info_node *node = (struct drm_info_node *) m->private; 253 + struct drm_device *dev = node->minor->dev; 254 + drm_i915_private_t *dev_priv = dev->dev_private; 255 + struct drm_gem_object *obj; 256 + struct drm_i915_gem_object *obj_priv; 257 + int ret; 258 + 259 + spin_lock(&dev_priv->mm.active_list_lock); 260 + 261 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 262 + obj = obj_priv->obj; 263 + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 264 + ret = i915_gem_object_get_pages(obj); 265 + if (ret) { 266 + DRM_ERROR("Failed to get pages: %d\n", ret); 267 + spin_unlock(&dev_priv->mm.active_list_lock); 268 + return ret; 269 + } 270 + 271 + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); 272 + i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); 273 + 274 + i915_gem_object_put_pages(obj); 275 + } 276 + } 277 + 278 + spin_unlock(&dev_priv->mm.active_list_lock); 279 + 280 + return 0; 281 + } 282 + 283 + static int i915_ringbuffer_data(struct seq_file *m, void *data) 284 + { 285 + struct drm_info_node *node = (struct drm_info_node *) m->private; 286 + struct drm_device *dev = node->minor->dev; 287 + drm_i915_private_t *dev_priv = dev->dev_private; 288 + u8 *virt; 289 + uint32_t *ptr, off; 290 + 291 + if (!dev_priv->ring.ring_obj) { 292 + seq_printf(m, "No ringbuffer setup\n"); 293 + return 0; 294 + } 295 + 296 + virt = dev_priv->ring.virtual_start; 297 + 298 + for (off = 0; off < dev_priv->ring.Size; off += 4) { 299 + ptr = (uint32_t *)(virt + off); 300 + seq_printf(m, "%08x : %08x\n", off, *ptr); 301 + } 302 + 303 + return 0; 304 + } 305 + 306 + static int i915_ringbuffer_info(struct seq_file *m, void *data) 307 + { 308 + struct drm_info_node *node = (struct drm_info_node *) m->private; 309 + struct drm_device *dev = node->minor->dev; 310 + drm_i915_private_t *dev_priv = dev->dev_private; 311 + unsigned int head, tail, mask; 312 + 313 + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 314 + tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 315 + mask = dev_priv->ring.tail_mask; 316 + 317 + seq_printf(m, "RingHead : %08x\n", head); 318 + seq_printf(m, "RingTail : %08x\n", tail); 319 + seq_printf(m, "RingMask : %08x\n", mask); 320 + seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 321 + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 322 + 323 + return 0; 324 + } 325 + 326 + 237 327 static struct drm_info_list i915_gem_debugfs_list[] = { 238 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 239 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, ··· 333 243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 334 244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 335 245 {"i915_gem_hws", i915_hws_info, 0}, 246 + {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 247 + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 248 + {"i915_batchbuffers", i915_batchbuffer_info, 0}, 336 249 }; 337 250 #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 338 251