+2
-2
drivers/gpu/drm/i915/i915_drv.c
+2
-2
drivers/gpu/drm/i915/i915_drv.c
···
1038
1038
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1039
1039
1040
1040
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1041
-
s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1041
+
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1042
1042
1043
1043
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1044
1044
s->ecochk = I915_READ(GAM_ECOCHK);
···
1120
1120
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1121
1121
1122
1122
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1123
-
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1123
+
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1124
1124
1125
1125
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1126
1126
I915_WRITE(GAM_ECOCHK, s->ecochk);
+2
-1
drivers/gpu/drm/i915/i915_gem.c
+2
-1
drivers/gpu/drm/i915/i915_gem.c
···
2377
2377
ret = ring->add_request(ring);
2378
2378
if (ret)
2379
2379
return ret;
2380
+
2381
+
request->tail = intel_ring_get_tail(ringbuf);
2380
2382
}
2381
2383
2382
2384
request->head = request_start;
2383
-
request->tail = intel_ring_get_tail(ringbuf);
2384
2385
2385
2386
/* Whilst this request exists, batch_obj will be on the
2386
2387
* active_list, and so will hold the active reference. Only when this
+1
drivers/gpu/drm/i915/i915_reg.h
+1
drivers/gpu/drm/i915/i915_reg.h
···
1807
1807
#define GMBUS_CYCLE_INDEX (2<<25)
1808
1808
#define GMBUS_CYCLE_STOP (4<<25)
1809
1809
#define GMBUS_BYTE_COUNT_SHIFT 16
1810
+
#define GMBUS_BYTE_COUNT_MAX 256U
1810
1811
#define GMBUS_SLAVE_INDEX_SHIFT 8
1811
1812
#define GMBUS_SLAVE_ADDR_SHIFT 1
1812
1813
#define GMBUS_SLAVE_READ (1<<0)
+56
-10
drivers/gpu/drm/i915/intel_i2c.c
+56
-10
drivers/gpu/drm/i915/intel_i2c.c
···
270
270
}
271
271
272
272
static int
273
-
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
274
-
u32 gmbus1_index)
273
+
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
274
+
unsigned short addr, u8 *buf, unsigned int len,
275
+
u32 gmbus1_index)
275
276
{
276
277
int reg_offset = dev_priv->gpio_mmio_base;
277
-
u16 len = msg->len;
278
-
u8 *buf = msg->buf;
279
278
280
279
I915_WRITE(GMBUS1 + reg_offset,
281
280
gmbus1_index |
282
281
GMBUS_CYCLE_WAIT |
283
282
(len << GMBUS_BYTE_COUNT_SHIFT) |
284
-
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
283
+
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
285
284
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
286
285
while (len) {
287
286
int ret;
···
302
303
}
303
304
304
305
static int
305
-
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
306
+
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
307
+
u32 gmbus1_index)
308
+
{
309
+
u8 *buf = msg->buf;
310
+
unsigned int rx_size = msg->len;
311
+
unsigned int len;
312
+
int ret;
313
+
314
+
do {
315
+
len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
316
+
317
+
ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
318
+
buf, len, gmbus1_index);
319
+
if (ret)
320
+
return ret;
321
+
322
+
rx_size -= len;
323
+
buf += len;
324
+
} while (rx_size != 0);
325
+
326
+
return 0;
327
+
}
328
+
329
+
static int
330
+
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
331
+
unsigned short addr, u8 *buf, unsigned int len)
306
332
{
307
333
int reg_offset = dev_priv->gpio_mmio_base;
308
-
u16 len = msg->len;
309
-
u8 *buf = msg->buf;
334
+
unsigned int chunk_size = len;
310
335
u32 val, loop;
311
336
312
337
val = loop = 0;
···
342
319
I915_WRITE(GMBUS3 + reg_offset, val);
343
320
I915_WRITE(GMBUS1 + reg_offset,
344
321
GMBUS_CYCLE_WAIT |
345
-
(msg->len << GMBUS_BYTE_COUNT_SHIFT) |
346
-
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
322
+
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
323
+
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
347
324
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
348
325
while (len) {
349
326
int ret;
···
360
337
if (ret)
361
338
return ret;
362
339
}
340
+
341
+
return 0;
342
+
}
343
+
344
+
static int
345
+
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
346
+
{
347
+
u8 *buf = msg->buf;
348
+
unsigned int tx_size = msg->len;
349
+
unsigned int len;
350
+
int ret;
351
+
352
+
do {
353
+
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
354
+
355
+
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
356
+
if (ret)
357
+
return ret;
358
+
359
+
buf += len;
360
+
tx_size -= len;
361
+
} while (tx_size != 0);
362
+
363
363
return 0;
364
364
}
365
365
+34
-1
drivers/gpu/drm/i915/intel_lrc.c
+34
-1
drivers/gpu/drm/i915/intel_lrc.c
···
393
393
}
394
394
}
395
395
396
+
if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
397
+
/*
398
+
* WaIdleLiteRestore: make sure we never cause a lite
399
+
* restore with HEAD==TAIL
400
+
*/
401
+
if (req0 && req0->elsp_submitted) {
402
+
/*
403
+
* Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
404
+
* as we resubmit the request. See gen8_emit_request()
405
+
* for where we prepare the padding after the end of the
406
+
* request.
407
+
*/
408
+
struct intel_ringbuffer *ringbuf;
409
+
410
+
ringbuf = req0->ctx->engine[ring->id].ringbuf;
411
+
req0->tail += 8;
412
+
req0->tail &= ringbuf->size - 1;
413
+
}
414
+
}
415
+
396
416
WARN_ON(req1 && req1->elsp_submitted);
397
417
398
418
execlists_submit_contexts(ring, req0->ctx, req0->tail,
···
1335
1315
u32 cmd;
1336
1316
int ret;
1337
1317
1338
-
ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
1318
+
/*
1319
+
* Reserve space for 2 NOOPs at the end of each request to be
1320
+
* used as a workaround for not being allowed to do lite
1321
+
* restore with HEAD==TAIL (WaIdleLiteRestore).
1322
+
*/
1323
+
ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
1339
1324
if (ret)
1340
1325
return ret;
1341
1326
···
1357
1332
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1358
1333
intel_logical_ring_emit(ringbuf, MI_NOOP);
1359
1334
intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
1335
+
1336
+
/*
1337
+
* Here we add two extra NOOPs as padding to avoid
1338
+
* lite restore of a context with HEAD==TAIL.
1339
+
*/
1340
+
intel_logical_ring_emit(ringbuf, MI_NOOP);
1341
+
intel_logical_ring_emit(ringbuf, MI_NOOP);
1342
+
intel_logical_ring_advance(ringbuf);
1360
1343
1361
1344
return 0;
1362
1345
}