Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.4-rc2 1799 lines 46 kB view raw
1/* 2 * Copyright (C) 2008 3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> 4 * 5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/dma-mapping.h> 13#include <linux/init.h> 14#include <linux/platform_device.h> 15#include <linux/err.h> 16#include <linux/spinlock.h> 17#include <linux/delay.h> 18#include <linux/list.h> 19#include <linux/clk.h> 20#include <linux/vmalloc.h> 21#include <linux/string.h> 22#include <linux/interrupt.h> 23#include <linux/io.h> 24#include <linux/module.h> 25 26#include <mach/ipu.h> 27 28#include "../dmaengine.h" 29#include "ipu_intern.h" 30 31#define FS_VF_IN_VALID 0x00000002 32#define FS_ENC_IN_VALID 0x00000001 33 34static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, 35 bool wait_for_stop); 36 37/* 38 * There can be only one, we could allocate it dynamically, but then we'd have 39 * to add an extra parameter to some functions, and use something as ugly as 40 * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device)); 41 * in the ISR 42 */ 43static struct ipu ipu_data; 44 45#define to_ipu(id) container_of(id, struct ipu, idmac) 46 47static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg) 48{ 49 return __raw_readl(ipu->reg_ic + reg); 50} 51 52#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF) 53 54static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg) 55{ 56 __raw_writel(value, ipu->reg_ic + reg); 57} 58 59#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF) 60 61static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg) 62{ 63 return __raw_readl(ipu->reg_ipu + reg); 64} 65 66static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg) 67{ 68 __raw_writel(value, ipu->reg_ipu + reg); 69} 70 71/***************************************************************************** 72 * IPU / IC common functions 73 */ 74static void dump_idmac_reg(struct ipu *ipu) 75{ 76 dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, " 77 "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n", 78 idmac_read_icreg(ipu, IDMAC_CONF), 79 idmac_read_icreg(ipu, IC_CONF), 80 idmac_read_icreg(ipu, IDMAC_CHA_EN), 81 idmac_read_icreg(ipu, IDMAC_CHA_PRI), 82 idmac_read_icreg(ipu, IDMAC_CHA_BUSY)); 83 dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, " 84 "DB_MODE 0x%x, TASKS_STAT 0x%x\n", 85 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), 86 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), 87 idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF), 88 idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL), 89 idmac_read_ipureg(ipu, IPU_TASKS_STAT)); 90} 91 92static uint32_t bytes_per_pixel(enum pixel_fmt fmt) 93{ 94 switch (fmt) { 95 case IPU_PIX_FMT_GENERIC: /* generic data */ 96 case IPU_PIX_FMT_RGB332: 97 case IPU_PIX_FMT_YUV420P: 98 case IPU_PIX_FMT_YUV422P: 99 default: 100 return 1; 101 case IPU_PIX_FMT_RGB565: 102 case IPU_PIX_FMT_YUYV: 103 case IPU_PIX_FMT_UYVY: 104 return 2; 105 case IPU_PIX_FMT_BGR24: 106 case IPU_PIX_FMT_RGB24: 107 return 3; 108 case IPU_PIX_FMT_GENERIC_32: /* generic data */ 109 case IPU_PIX_FMT_BGR32: 110 case IPU_PIX_FMT_RGB32: 111 case IPU_PIX_FMT_ABGR32: 112 return 4; 113 } 114} 115 116/* Enable direct write to memory by the Camera Sensor Interface */ 117static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) 118{ 119 uint32_t ic_conf, mask; 120 121 switch (channel) { 122 case IDMAC_IC_0: 123 mask = IC_CONF_PRPENC_EN; 124 break; 125 case IDMAC_IC_7: 126 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; 127 break; 128 default: 129 return; 130 } 131 ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask; 132 idmac_write_icreg(ipu, ic_conf, IC_CONF); 133} 134 135/* Called under spin_lock_irqsave(&ipu_data.lock) */ 136static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) 137{ 138 uint32_t ic_conf, mask; 139 140 switch (channel) { 141 case IDMAC_IC_0: 142 mask = IC_CONF_PRPENC_EN; 143 break; 144 case IDMAC_IC_7: 145 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; 146 break; 147 default: 148 return; 149 } 150 ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask; 151 idmac_write_icreg(ipu, ic_conf, IC_CONF); 152} 153 154static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel) 155{ 156 uint32_t stat = TASK_STAT_IDLE; 157 uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT); 158 159 switch (channel) { 160 case IDMAC_IC_7: 161 stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >> 162 TSTAT_CSI2MEM_OFFSET; 163 break; 164 case IDMAC_IC_0: 165 case IDMAC_SDC_0: 166 case IDMAC_SDC_1: 167 default: 168 break; 169 } 170 return stat; 171} 172 173struct chan_param_mem_planar { 174 /* Word 0 */ 175 u32 xv:10; 176 u32 yv:10; 177 u32 xb:12; 178 179 u32 yb:12; 180 u32 res1:2; 181 u32 nsb:1; 182 u32 lnpb:6; 183 u32 ubo_l:11; 184 185 u32 ubo_h:15; 186 u32 vbo_l:17; 187 188 u32 vbo_h:9; 189 u32 res2:3; 190 u32 fw:12; 191 u32 fh_l:8; 192 193 u32 fh_h:4; 194 u32 res3:28; 195 196 /* Word 1 */ 197 u32 eba0; 198 199 u32 eba1; 200 201 u32 bpp:3; 202 u32 sl:14; 203 u32 pfs:3; 204 u32 bam:3; 205 u32 res4:2; 206 u32 npb:6; 207 u32 res5:1; 208 209 u32 sat:2; 210 u32 res6:30; 211} __attribute__ ((packed)); 212 213struct chan_param_mem_interleaved { 214 /* Word 0 */ 215 u32 xv:10; 216 u32 yv:10; 217 u32 xb:12; 218 219 u32 yb:12; 220 u32 sce:1; 221 u32 res1:1; 222 u32 nsb:1; 223 u32 lnpb:6; 224 u32 sx:10; 225 u32 sy_l:1; 226 227 u32 sy_h:9; 228 u32 ns:10; 229 u32 sm:10; 230 u32 sdx_l:3; 231 232 u32 sdx_h:2; 233 u32 sdy:5; 234 u32 sdrx:1; 235 u32 sdry:1; 236 u32 sdr1:1; 237 u32 res2:2; 238 u32 fw:12; 239 u32 fh_l:8; 240 241 u32 fh_h:4; 242 u32 res3:28; 243 244 /* Word 1 */ 245 u32 eba0; 246 247 u32 eba1; 248 249 u32 bpp:3; 250 u32 sl:14; 251 u32 pfs:3; 252 u32 bam:3; 253 u32 res4:2; 254 u32 npb:6; 255 u32 res5:1; 256 257 u32 sat:2; 258 u32 scc:1; 259 u32 ofs0:5; 260 u32 ofs1:5; 261 u32 ofs2:5; 262 u32 ofs3:5; 263 u32 wid0:3; 264 u32 wid1:3; 265 u32 wid2:3; 266 267 u32 wid3:3; 268 u32 dec_sel:1; 269 u32 res6:28; 270} __attribute__ ((packed)); 271 272union chan_param_mem { 273 struct chan_param_mem_planar pp; 274 struct chan_param_mem_interleaved ip; 275}; 276 277static void ipu_ch_param_set_plane_offset(union chan_param_mem *params, 278 u32 u_offset, u32 v_offset) 279{ 280 params->pp.ubo_l = u_offset & 0x7ff; 281 params->pp.ubo_h = u_offset >> 11; 282 params->pp.vbo_l = v_offset & 0x1ffff; 283 params->pp.vbo_h = v_offset >> 17; 284} 285 286static void ipu_ch_param_set_size(union chan_param_mem *params, 287 uint32_t pixel_fmt, uint16_t width, 288 uint16_t height, uint16_t stride) 289{ 290 u32 u_offset; 291 u32 v_offset; 292 293 params->pp.fw = width - 1; 294 params->pp.fh_l = height - 1; 295 params->pp.fh_h = (height - 1) >> 8; 296 params->pp.sl = stride - 1; 297 298 switch (pixel_fmt) { 299 case IPU_PIX_FMT_GENERIC: 300 /*Represents 8-bit Generic data */ 301 params->pp.bpp = 3; 302 params->pp.pfs = 7; 303 params->pp.npb = 31; 304 params->pp.sat = 2; /* SAT = use 32-bit access */ 305 break; 306 case IPU_PIX_FMT_GENERIC_32: 307 /*Represents 32-bit Generic data */ 308 params->pp.bpp = 0; 309 params->pp.pfs = 7; 310 params->pp.npb = 7; 311 params->pp.sat = 2; /* SAT = use 32-bit access */ 312 break; 313 case IPU_PIX_FMT_RGB565: 314 params->ip.bpp = 2; 315 params->ip.pfs = 4; 316 params->ip.npb = 15; 317 params->ip.sat = 2; /* SAT = 32-bit access */ 318 params->ip.ofs0 = 0; /* Red bit offset */ 319 params->ip.ofs1 = 5; /* Green bit offset */ 320 params->ip.ofs2 = 11; /* Blue bit offset */ 321 params->ip.ofs3 = 16; /* Alpha bit offset */ 322 params->ip.wid0 = 4; /* Red bit width - 1 */ 323 params->ip.wid1 = 5; /* Green bit width - 1 */ 324 params->ip.wid2 = 4; /* Blue bit width - 1 */ 325 break; 326 case IPU_PIX_FMT_BGR24: 327 params->ip.bpp = 1; /* 24 BPP & RGB PFS */ 328 params->ip.pfs = 4; 329 params->ip.npb = 7; 330 params->ip.sat = 2; /* SAT = 32-bit access */ 331 params->ip.ofs0 = 0; /* Red bit offset */ 332 params->ip.ofs1 = 8; /* Green bit offset */ 333 params->ip.ofs2 = 16; /* Blue bit offset */ 334 params->ip.ofs3 = 24; /* Alpha bit offset */ 335 params->ip.wid0 = 7; /* Red bit width - 1 */ 336 params->ip.wid1 = 7; /* Green bit width - 1 */ 337 params->ip.wid2 = 7; /* Blue bit width - 1 */ 338 break; 339 case IPU_PIX_FMT_RGB24: 340 params->ip.bpp = 1; /* 24 BPP & RGB PFS */ 341 params->ip.pfs = 4; 342 params->ip.npb = 7; 343 params->ip.sat = 2; /* SAT = 32-bit access */ 344 params->ip.ofs0 = 16; /* Red bit offset */ 345 params->ip.ofs1 = 8; /* Green bit offset */ 346 params->ip.ofs2 = 0; /* Blue bit offset */ 347 params->ip.ofs3 = 24; /* Alpha bit offset */ 348 params->ip.wid0 = 7; /* Red bit width - 1 */ 349 params->ip.wid1 = 7; /* Green bit width - 1 */ 350 params->ip.wid2 = 7; /* Blue bit width - 1 */ 351 break; 352 case IPU_PIX_FMT_BGRA32: 353 case IPU_PIX_FMT_BGR32: 354 case IPU_PIX_FMT_ABGR32: 355 params->ip.bpp = 0; 356 params->ip.pfs = 4; 357 params->ip.npb = 7; 358 params->ip.sat = 2; /* SAT = 32-bit access */ 359 params->ip.ofs0 = 8; /* Red bit offset */ 360 params->ip.ofs1 = 16; /* Green bit offset */ 361 params->ip.ofs2 = 24; /* Blue bit offset */ 362 params->ip.ofs3 = 0; /* Alpha bit offset */ 363 params->ip.wid0 = 7; /* Red bit width - 1 */ 364 params->ip.wid1 = 7; /* Green bit width - 1 */ 365 params->ip.wid2 = 7; /* Blue bit width - 1 */ 366 params->ip.wid3 = 7; /* Alpha bit width - 1 */ 367 break; 368 case IPU_PIX_FMT_RGBA32: 369 case IPU_PIX_FMT_RGB32: 370 params->ip.bpp = 0; 371 params->ip.pfs = 4; 372 params->ip.npb = 7; 373 params->ip.sat = 2; /* SAT = 32-bit access */ 374 params->ip.ofs0 = 24; /* Red bit offset */ 375 params->ip.ofs1 = 16; /* Green bit offset */ 376 params->ip.ofs2 = 8; /* Blue bit offset */ 377 params->ip.ofs3 = 0; /* Alpha bit offset */ 378 params->ip.wid0 = 7; /* Red bit width - 1 */ 379 params->ip.wid1 = 7; /* Green bit width - 1 */ 380 params->ip.wid2 = 7; /* Blue bit width - 1 */ 381 params->ip.wid3 = 7; /* Alpha bit width - 1 */ 382 break; 383 case IPU_PIX_FMT_UYVY: 384 params->ip.bpp = 2; 385 params->ip.pfs = 6; 386 params->ip.npb = 7; 387 params->ip.sat = 2; /* SAT = 32-bit access */ 388 break; 389 case IPU_PIX_FMT_YUV420P2: 390 case IPU_PIX_FMT_YUV420P: 391 params->ip.bpp = 3; 392 params->ip.pfs = 3; 393 params->ip.npb = 7; 394 params->ip.sat = 2; /* SAT = 32-bit access */ 395 u_offset = stride * height; 396 v_offset = u_offset + u_offset / 4; 397 ipu_ch_param_set_plane_offset(params, u_offset, v_offset); 398 break; 399 case IPU_PIX_FMT_YVU422P: 400 params->ip.bpp = 3; 401 params->ip.pfs = 2; 402 params->ip.npb = 7; 403 params->ip.sat = 2; /* SAT = 32-bit access */ 404 v_offset = stride * height; 405 u_offset = v_offset + v_offset / 2; 406 ipu_ch_param_set_plane_offset(params, u_offset, v_offset); 407 break; 408 case IPU_PIX_FMT_YUV422P: 409 params->ip.bpp = 3; 410 params->ip.pfs = 2; 411 params->ip.npb = 7; 412 params->ip.sat = 2; /* SAT = 32-bit access */ 413 u_offset = stride * height; 414 v_offset = u_offset + u_offset / 2; 415 ipu_ch_param_set_plane_offset(params, u_offset, v_offset); 416 break; 417 default: 418 dev_err(ipu_data.dev, 419 "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); 420 break; 421 } 422 423 params->pp.nsb = 1; 424} 425 426static void ipu_ch_param_set_buffer(union chan_param_mem *params, 427 dma_addr_t buf0, dma_addr_t buf1) 428{ 429 params->pp.eba0 = buf0; 430 params->pp.eba1 = buf1; 431} 432 433static void ipu_ch_param_set_rotation(union chan_param_mem *params, 434 enum ipu_rotate_mode rotate) 435{ 436 params->pp.bam = rotate; 437} 438 439static void ipu_write_param_mem(uint32_t addr, uint32_t *data, 440 uint32_t num_words) 441{ 442 for (; num_words > 0; num_words--) { 443 dev_dbg(ipu_data.dev, 444 "write param mem - addr = 0x%08X, data = 0x%08X\n", 445 addr, *data); 446 idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR); 447 idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA); 448 addr++; 449 if ((addr & 0x7) == 5) { 450 addr &= ~0x7; /* set to word 0 */ 451 addr += 8; /* increment to next row */ 452 } 453 } 454} 455 456static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size, 457 uint32_t *resize_coeff, 458 uint32_t *downsize_coeff) 459{ 460 uint32_t temp_size; 461 uint32_t temp_downsize; 462 463 *resize_coeff = 1 << 13; 464 *downsize_coeff = 1 << 13; 465 466 /* Cannot downsize more than 8:1 */ 467 if (out_size << 3 < in_size) 468 return -EINVAL; 469 470 /* compute downsizing coefficient */ 471 temp_downsize = 0; 472 temp_size = in_size; 473 while (temp_size >= out_size * 2 && temp_downsize < 2) { 474 temp_size >>= 1; 475 temp_downsize++; 476 } 477 *downsize_coeff = temp_downsize; 478 479 /* 480 * compute resizing coefficient using the following formula: 481 * resize_coeff = M*(SI -1)/(SO - 1) 482 * where M = 2^13, SI - input size, SO - output size 483 */ 484 *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1); 485 if (*resize_coeff >= 16384L) { 486 dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n"); 487 *resize_coeff = 0x3FFF; 488 } 489 490 dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, " 491 "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size, 492 *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0, 493 ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff); 494 495 return 0; 496} 497 498static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt) 499{ 500 switch (fmt) { 501 case IPU_PIX_FMT_RGB565: 502 case IPU_PIX_FMT_BGR24: 503 case IPU_PIX_FMT_RGB24: 504 case IPU_PIX_FMT_BGR32: 505 case IPU_PIX_FMT_RGB32: 506 return IPU_COLORSPACE_RGB; 507 default: 508 return IPU_COLORSPACE_YCBCR; 509 } 510} 511 512static int ipu_ic_init_prpenc(struct ipu *ipu, 513 union ipu_channel_param *params, bool src_is_csi) 514{ 515 uint32_t reg, ic_conf; 516 uint32_t downsize_coeff, resize_coeff; 517 enum ipu_color_space in_fmt, out_fmt; 518 519 /* Setup vertical resizing */ 520 calc_resize_coeffs(params->video.in_height, 521 params->video.out_height, 522 &resize_coeff, &downsize_coeff); 523 reg = (downsize_coeff << 30) | (resize_coeff << 16); 524 525 /* Setup horizontal resizing */ 526 calc_resize_coeffs(params->video.in_width, 527 params->video.out_width, 528 &resize_coeff, &downsize_coeff); 529 reg |= (downsize_coeff << 14) | resize_coeff; 530 531 /* Setup color space conversion */ 532 in_fmt = format_to_colorspace(params->video.in_pixel_fmt); 533 out_fmt = format_to_colorspace(params->video.out_pixel_fmt); 534 535 /* 536 * Colourspace conversion unsupported yet - see _init_csc() in 537 * Freescale sources 538 */ 539 if (in_fmt != out_fmt) { 540 dev_err(ipu->dev, "Colourspace conversion unsupported!\n"); 541 return -EOPNOTSUPP; 542 } 543 544 idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC); 545 546 ic_conf = idmac_read_icreg(ipu, IC_CONF); 547 548 if (src_is_csi) 549 ic_conf &= ~IC_CONF_RWS_EN; 550 else 551 ic_conf |= IC_CONF_RWS_EN; 552 553 idmac_write_icreg(ipu, ic_conf, IC_CONF); 554 555 return 0; 556} 557 558static uint32_t dma_param_addr(uint32_t dma_ch) 559{ 560 /* Channel Parameter Memory */ 561 return 0x10000 | (dma_ch << 4); 562} 563 564static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, 565 bool prio) 566{ 567 u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI); 568 569 if (prio) 570 reg |= 1UL << channel; 571 else 572 reg &= ~(1UL << channel); 573 574 idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI); 575 576 dump_idmac_reg(ipu); 577} 578 579static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) 580{ 581 uint32_t mask; 582 583 switch (channel) { 584 case IDMAC_IC_0: 585 case IDMAC_IC_7: 586 mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN; 587 break; 588 case IDMAC_SDC_0: 589 case IDMAC_SDC_1: 590 mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN; 591 break; 592 default: 593 mask = 0; 594 break; 595 } 596 597 return mask; 598} 599 600/** 601 * ipu_enable_channel() - enable an IPU channel. 602 * @idmac: IPU DMAC context. 603 * @ichan: IDMAC channel. 604 * @return: 0 on success or negative error code on failure. 605 */ 606static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) 607{ 608 struct ipu *ipu = to_ipu(idmac); 609 enum ipu_channel channel = ichan->dma_chan.chan_id; 610 uint32_t reg; 611 unsigned long flags; 612 613 spin_lock_irqsave(&ipu->lock, flags); 614 615 /* Reset to buffer 0 */ 616 idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF); 617 ichan->active_buffer = 0; 618 ichan->status = IPU_CHANNEL_ENABLED; 619 620 switch (channel) { 621 case IDMAC_SDC_0: 622 case IDMAC_SDC_1: 623 case IDMAC_IC_7: 624 ipu_channel_set_priority(ipu, channel, true); 625 default: 626 break; 627 } 628 629 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); 630 631 idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN); 632 633 ipu_ic_enable_task(ipu, channel); 634 635 spin_unlock_irqrestore(&ipu->lock, flags); 636 return 0; 637} 638 639/** 640 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. 641 * @ichan: IDMAC channel. 642 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. 643 * @width: width of buffer in pixels. 644 * @height: height of buffer in pixels. 645 * @stride: stride length of buffer in pixels. 646 * @rot_mode: rotation mode of buffer. A rotation setting other than 647 * IPU_ROTATE_VERT_FLIP should only be used for input buffers of 648 * rotation channels. 649 * @phyaddr_0: buffer 0 physical address. 650 * @phyaddr_1: buffer 1 physical address. Setting this to a value other than 651 * NULL enables double buffering mode. 652 * @return: 0 on success or negative error code on failure. 653 */ 654static int ipu_init_channel_buffer(struct idmac_channel *ichan, 655 enum pixel_fmt pixel_fmt, 656 uint16_t width, uint16_t height, 657 uint32_t stride, 658 enum ipu_rotate_mode rot_mode, 659 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1) 660{ 661 enum ipu_channel channel = ichan->dma_chan.chan_id; 662 struct idmac *idmac = to_idmac(ichan->dma_chan.device); 663 struct ipu *ipu = to_ipu(idmac); 664 union chan_param_mem params = {}; 665 unsigned long flags; 666 uint32_t reg; 667 uint32_t stride_bytes; 668 669 stride_bytes = stride * bytes_per_pixel(pixel_fmt); 670 671 if (stride_bytes % 4) { 672 dev_err(ipu->dev, 673 "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n", 674 stride, stride_bytes); 675 return -EINVAL; 676 } 677 678 /* IC channel's stride must be a multiple of 8 pixels */ 679 if ((channel <= IDMAC_IC_13) && (stride % 8)) { 680 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); 681 return -EINVAL; 682 } 683 684 /* Build parameter memory data for DMA channel */ 685 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes); 686 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1); 687 ipu_ch_param_set_rotation(&params, rot_mode); 688 689 spin_lock_irqsave(&ipu->lock, flags); 690 691 ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10); 692 693 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); 694 695 if (phyaddr_1) 696 reg |= 1UL << channel; 697 else 698 reg &= ~(1UL << channel); 699 700 idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL); 701 702 ichan->status = IPU_CHANNEL_READY; 703 704 spin_unlock_irqrestore(&ipu->lock, flags); 705 706 return 0; 707} 708 709/** 710 * ipu_select_buffer() - mark a channel's buffer as ready. 711 * @channel: channel ID. 712 * @buffer_n: buffer number to mark ready. 713 */ 714static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) 715{ 716 /* No locking - this is a write-one-to-set register, cleared by IPU */ 717 if (buffer_n == 0) 718 /* Mark buffer 0 as ready. */ 719 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY); 720 else 721 /* Mark buffer 1 as ready. */ 722 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY); 723} 724 725/** 726 * ipu_update_channel_buffer() - update physical address of a channel buffer. 727 * @ichan: IDMAC channel. 728 * @buffer_n: buffer number to update. 729 * 0 or 1 are the only valid values. 730 * @phyaddr: buffer physical address. 731 */ 732/* Called under spin_lock(_irqsave)(&ichan->lock) */ 733static void ipu_update_channel_buffer(struct idmac_channel *ichan, 734 int buffer_n, dma_addr_t phyaddr) 735{ 736 enum ipu_channel channel = ichan->dma_chan.chan_id; 737 uint32_t reg; 738 unsigned long flags; 739 740 spin_lock_irqsave(&ipu_data.lock, flags); 741 742 if (buffer_n == 0) { 743 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 744 if (reg & (1UL << channel)) { 745 ipu_ic_disable_task(&ipu_data, channel); 746 ichan->status = IPU_CHANNEL_READY; 747 } 748 749 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ 750 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + 751 0x0008UL, IPU_IMA_ADDR); 752 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); 753 } else { 754 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 755 if (reg & (1UL << channel)) { 756 ipu_ic_disable_task(&ipu_data, channel); 757 ichan->status = IPU_CHANNEL_READY; 758 } 759 760 /* Check if double-buffering is already enabled */ 761 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL); 762 763 if (!(reg & (1UL << channel))) 764 idmac_write_ipureg(&ipu_data, reg | (1UL << channel), 765 IPU_CHA_DB_MODE_SEL); 766 767 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */ 768 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + 769 0x0009UL, IPU_IMA_ADDR); 770 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); 771 } 772 773 spin_unlock_irqrestore(&ipu_data.lock, flags); 774} 775 776/* Called under spin_lock_irqsave(&ichan->lock) */ 777static int ipu_submit_buffer(struct idmac_channel *ichan, 778 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) 779{ 780 unsigned int chan_id = ichan->dma_chan.chan_id; 781 struct device *dev = &ichan->dma_chan.dev->device; 782 783 if (async_tx_test_ack(&desc->txd)) 784 return -EINTR; 785 786 /* 787 * On first invocation this shouldn't be necessary, the call to 788 * ipu_init_channel_buffer() above will set addresses for us, so we 789 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 790 * doing it again shouldn't hurt either. 791 */ 792 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); 793 794 ipu_select_buffer(chan_id, buf_idx); 795 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 796 sg, chan_id, buf_idx); 797 798 return 0; 799} 800 801/* Called under spin_lock_irqsave(&ichan->lock) */ 802static int ipu_submit_channel_buffers(struct idmac_channel *ichan, 803 struct idmac_tx_desc *desc) 804{ 805 struct scatterlist *sg; 806 int i, ret = 0; 807 808 for (i = 0, sg = desc->sg; i < 2 && sg; i++) { 809 if (!ichan->sg[i]) { 810 ichan->sg[i] = sg; 811 812 ret = ipu_submit_buffer(ichan, desc, sg, i); 813 if (ret < 0) 814 return ret; 815 816 sg = sg_next(sg); 817 } 818 } 819 820 return ret; 821} 822 823static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) 824{ 825 struct idmac_tx_desc *desc = to_tx_desc(tx); 826 struct idmac_channel *ichan = to_idmac_chan(tx->chan); 827 struct idmac *idmac = to_idmac(tx->chan->device); 828 struct ipu *ipu = to_ipu(idmac); 829 struct device *dev = &ichan->dma_chan.dev->device; 830 dma_cookie_t cookie; 831 unsigned long flags; 832 int ret; 833 834 /* Sanity check */ 835 if (!list_empty(&desc->list)) { 836 /* The descriptor doesn't belong to client */ 837 dev_err(dev, "Descriptor %p not prepared!\n", tx); 838 return -EBUSY; 839 } 840 841 mutex_lock(&ichan->chan_mutex); 842 843 async_tx_clear_ack(tx); 844 845 if (ichan->status < IPU_CHANNEL_READY) { 846 struct idmac_video_param *video = &ichan->params.video; 847 /* 848 * Initial buffer assignment - the first two sg-entries from 849 * the descriptor will end up in the IDMAC buffers 850 */ 851 dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 : 852 sg_dma_address(&desc->sg[1]); 853 854 WARN_ON(ichan->sg[0] || ichan->sg[1]); 855 856 cookie = ipu_init_channel_buffer(ichan, 857 video->out_pixel_fmt, 858 video->out_width, 859 video->out_height, 860 video->out_stride, 861 IPU_ROTATE_NONE, 862 sg_dma_address(&desc->sg[0]), 863 dma_1); 864 if (cookie < 0) 865 goto out; 866 } 867 868 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); 869 870 cookie = dma_cookie_assign(tx); 871 872 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 873 spin_lock_irqsave(&ichan->lock, flags); 874 875 list_add_tail(&desc->list, &ichan->queue); 876 /* submit_buffers() atomically verifies and fills empty sg slots */ 877 ret = ipu_submit_channel_buffers(ichan, desc); 878 879 spin_unlock_irqrestore(&ichan->lock, flags); 880 881 if (ret < 0) { 882 cookie = ret; 883 goto dequeue; 884 } 885 886 if (ichan->status < IPU_CHANNEL_ENABLED) { 887 ret = ipu_enable_channel(idmac, ichan); 888 if (ret < 0) { 889 cookie = ret; 890 goto dequeue; 891 } 892 } 893 894 dump_idmac_reg(ipu); 895 896dequeue: 897 if (cookie < 0) { 898 spin_lock_irqsave(&ichan->lock, flags); 899 list_del_init(&desc->list); 900 spin_unlock_irqrestore(&ichan->lock, flags); 901 tx->cookie = cookie; 902 ichan->dma_chan.cookie = cookie; 903 } 904 905out: 906 mutex_unlock(&ichan->chan_mutex); 907 908 return cookie; 909} 910 911/* Called with ichan->chan_mutex held */ 912static int idmac_desc_alloc(struct idmac_channel *ichan, int n) 913{ 914 struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); 915 struct idmac *idmac = to_idmac(ichan->dma_chan.device); 916 917 if (!desc) 918 return -ENOMEM; 919 920 /* No interrupts, just disable the tasklet for a moment */ 921 tasklet_disable(&to_ipu(idmac)->tasklet); 922 923 ichan->n_tx_desc = n; 924 ichan->desc = desc; 925 INIT_LIST_HEAD(&ichan->queue); 926 INIT_LIST_HEAD(&ichan->free_list); 927 928 while (n--) { 929 struct dma_async_tx_descriptor *txd = &desc->txd; 930 931 memset(txd, 0, sizeof(*txd)); 932 dma_async_tx_descriptor_init(txd, &ichan->dma_chan); 933 txd->tx_submit = idmac_tx_submit; 934 935 list_add(&desc->list, &ichan->free_list); 936 937 desc++; 938 } 939 940 tasklet_enable(&to_ipu(idmac)->tasklet); 941 942 return 0; 943} 944 945/** 946 * ipu_init_channel() - initialize an IPU channel. 947 * @idmac: IPU DMAC context. 948 * @ichan: pointer to the channel object. 949 * @return 0 on success or negative error code on failure. 950 */ 951static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) 952{ 953 union ipu_channel_param *params = &ichan->params; 954 uint32_t ipu_conf; 955 enum ipu_channel channel = ichan->dma_chan.chan_id; 956 unsigned long flags; 957 uint32_t reg; 958 struct ipu *ipu = to_ipu(idmac); 959 int ret = 0, n_desc = 0; 960 961 dev_dbg(ipu->dev, "init channel = %d\n", channel); 962 963 if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 && 964 channel != IDMAC_IC_7) 965 return -EINVAL; 966 967 spin_lock_irqsave(&ipu->lock, flags); 968 969 switch (channel) { 970 case IDMAC_IC_7: 971 n_desc = 16; 972 reg = idmac_read_icreg(ipu, IC_CONF); 973 idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF); 974 break; 975 case IDMAC_IC_0: 976 n_desc = 16; 977 reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW); 978 idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW); 979 ret = ipu_ic_init_prpenc(ipu, params, true); 980 break; 981 case IDMAC_SDC_0: 982 case IDMAC_SDC_1: 983 n_desc = 4; 984 default: 985 break; 986 } 987 988 ipu->channel_init_mask |= 1L << channel; 989 990 /* Enable IPU sub module */ 991 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) | 992 ipu_channel_conf_mask(channel); 993 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); 994 995 spin_unlock_irqrestore(&ipu->lock, flags); 996 997 if (n_desc && !ichan->desc) 998 ret = idmac_desc_alloc(ichan, n_desc); 999 1000 dump_idmac_reg(ipu); 1001 1002 return ret; 1003} 1004 1005/** 1006 * ipu_uninit_channel() - uninitialize an IPU channel. 1007 * @idmac: IPU DMAC context. 1008 * @ichan: pointer to the channel object. 1009 */ 1010static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan) 1011{ 1012 enum ipu_channel channel = ichan->dma_chan.chan_id; 1013 unsigned long flags; 1014 uint32_t reg; 1015 unsigned long chan_mask = 1UL << channel; 1016 uint32_t ipu_conf; 1017 struct ipu *ipu = to_ipu(idmac); 1018 1019 spin_lock_irqsave(&ipu->lock, flags); 1020 1021 if (!(ipu->channel_init_mask & chan_mask)) { 1022 dev_err(ipu->dev, "Channel already uninitialized %d\n", 1023 channel); 1024 spin_unlock_irqrestore(&ipu->lock, flags); 1025 return; 1026 } 1027 1028 /* Reset the double buffer */ 1029 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); 1030 idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL); 1031 1032 ichan->sec_chan_en = false; 1033 1034 switch (channel) { 1035 case IDMAC_IC_7: 1036 reg = idmac_read_icreg(ipu, IC_CONF); 1037 idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN), 1038 IC_CONF); 1039 break; 1040 case IDMAC_IC_0: 1041 reg = idmac_read_icreg(ipu, IC_CONF); 1042 idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1), 1043 IC_CONF); 1044 break; 1045 case IDMAC_SDC_0: 1046 case IDMAC_SDC_1: 1047 default: 1048 break; 1049 } 1050 1051 ipu->channel_init_mask &= ~(1L << channel); 1052 1053 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) & 1054 ~ipu_channel_conf_mask(channel); 1055 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); 1056 1057 spin_unlock_irqrestore(&ipu->lock, flags); 1058 1059 ichan->n_tx_desc = 0; 1060 vfree(ichan->desc); 1061 ichan->desc = NULL; 1062} 1063 1064/** 1065 * ipu_disable_channel() - disable an IPU channel. 1066 * @idmac: IPU DMAC context. 1067 * @ichan: channel object pointer. 1068 * @wait_for_stop: flag to set whether to wait for channel end of frame or 1069 * return immediately. 1070 * @return: 0 on success or negative error code on failure. 1071 */ 1072static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, 1073 bool wait_for_stop) 1074{ 1075 enum ipu_channel channel = ichan->dma_chan.chan_id; 1076 struct ipu *ipu = to_ipu(idmac); 1077 uint32_t reg; 1078 unsigned long flags; 1079 unsigned long chan_mask = 1UL << channel; 1080 unsigned int timeout; 1081 1082 if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) { 1083 timeout = 40; 1084 /* This waiting always fails. Related to spurious irq problem */ 1085 while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) || 1086 (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) { 1087 timeout--; 1088 msleep(10); 1089 1090 if (!timeout) { 1091 dev_dbg(ipu->dev, 1092 "Warning: timeout waiting for channel %u to " 1093 "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, " 1094 "busy = 0x%08X, tstat = 0x%08X\n", channel, 1095 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), 1096 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), 1097 idmac_read_icreg(ipu, IDMAC_CHA_BUSY), 1098 idmac_read_ipureg(ipu, IPU_TASKS_STAT)); 1099 break; 1100 } 1101 } 1102 dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout); 1103 } 1104 /* SDC BG and FG must be disabled before DMA is disabled */ 1105 if (wait_for_stop && (channel == IDMAC_SDC_0 || 1106 channel == IDMAC_SDC_1)) { 1107 for (timeout = 5; 1108 timeout && !ipu_irq_status(ichan->eof_irq); timeout--) 1109 msleep(5); 1110 } 1111 1112 spin_lock_irqsave(&ipu->lock, flags); 1113 1114 /* Disable IC task */ 1115 ipu_ic_disable_task(ipu, channel); 1116 1117 /* Disable DMA channel(s) */ 1118 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); 1119 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); 1120 1121 spin_unlock_irqrestore(&ipu->lock, flags); 1122 1123 return 0; 1124} 1125 1126static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, 1127 struct idmac_tx_desc **desc, struct scatterlist *sg) 1128{ 1129 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; 1130 1131 if (sgnew) 1132 /* next sg-element in this list */ 1133 return sgnew; 1134 1135 if ((*desc)->list.next == &ichan->queue) 1136 /* No more descriptors on the queue */ 1137 return NULL; 1138 1139 /* Fetch next descriptor */ 1140 *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); 1141 return (*desc)->sg; 1142} 1143 1144/* 1145 * We have several possibilities here: 1146 * current BUF next BUF 1147 * 1148 * not last sg next not last sg 1149 * not last sg next last sg 1150 * last sg first sg from next descriptor 1151 * last sg NULL 1152 * 1153 * Besides, the descriptor queue might be empty or not. We process all these 1154 * cases carefully. 1155 */ 1156static irqreturn_t idmac_interrupt(int irq, void *dev_id) 1157{ 1158 struct idmac_channel *ichan = dev_id; 1159 struct device *dev = &ichan->dma_chan.dev->device; 1160 unsigned int chan_id = ichan->dma_chan.chan_id; 1161 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1162 /* Next transfer descriptor */ 1163 struct idmac_tx_desc *desc, *descnew; 1164 dma_async_tx_callback callback; 1165 void *callback_param; 1166 bool done = false; 1167 u32 ready0, ready1, curbuf, err; 1168 unsigned long flags; 1169 1170 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1171 1172 dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); 1173 1174 spin_lock_irqsave(&ipu_data.lock, flags); 1175 1176 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 1177 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 1178 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); 1179 err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); 1180 1181 if (err & (1 << chan_id)) { 1182 idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); 1183 spin_unlock_irqrestore(&ipu_data.lock, flags); 1184 /* 1185 * Doing this 1186 * ichan->sg[0] = ichan->sg[1] = NULL; 1187 * you can force channel re-enable on the next tx_submit(), but 1188 * this is dirty - think about descriptors with multiple 1189 * sg elements. 1190 */ 1191 dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", 1192 chan_id, ready0, ready1, curbuf); 1193 return IRQ_HANDLED; 1194 } 1195 spin_unlock_irqrestore(&ipu_data.lock, flags); 1196 1197 /* Other interrupts do not interfere with this channel */ 1198 spin_lock(&ichan->lock); 1199 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1200 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1201 )) { 1202 spin_unlock(&ichan->lock); 1203 dev_dbg(dev, 1204 "IRQ with active buffer still ready on channel %x, " 1205 "active %d, ready %x, %x!\n", chan_id, 1206 ichan->active_buffer, ready0, ready1); 1207 return IRQ_NONE; 1208 } 1209 1210 if (unlikely(list_empty(&ichan->queue))) { 1211 ichan->sg[ichan->active_buffer] = NULL; 1212 spin_unlock(&ichan->lock); 1213 dev_err(dev, 1214 "IRQ without queued buffers on channel %x, active %d, " 1215 "ready %x, %x!\n", chan_id, 1216 ichan->active_buffer, ready0, ready1); 1217 return IRQ_NONE; 1218 } 1219 1220 /* 1221 * active_buffer is a software flag, it shows which buffer we are 1222 * currently expecting back from the hardware, IDMAC should be 1223 * processing the other buffer already 1224 */ 1225 sg = &ichan->sg[ichan->active_buffer]; 1226 sgnext = ichan->sg[!ichan->active_buffer]; 1227 1228 if (!*sg) { 1229 spin_unlock(&ichan->lock); 1230 return IRQ_HANDLED; 1231 } 1232 1233 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1234 descnew = desc; 1235 1236 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", 1237 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); 1238 1239 /* Find the descriptor of sgnext */ 1240 sgnew = idmac_sg_next(ichan, &descnew, *sg); 1241 if (sgnext != sgnew) 1242 dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); 1243 1244 /* 1245 * if sgnext == NULL sg must be the last element in a scatterlist and 1246 * queue must be empty 1247 */ 1248 if (unlikely(!sgnext)) { 1249 if (!WARN_ON(sg_next(*sg))) 1250 dev_dbg(dev, "Underrun on channel %x\n", chan_id); 1251 ichan->sg[!ichan->active_buffer] = sgnew; 1252 1253 if (unlikely(sgnew)) { 1254 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); 1255 } else { 1256 spin_lock_irqsave(&ipu_data.lock, flags); 1257 ipu_ic_disable_task(&ipu_data, chan_id); 1258 spin_unlock_irqrestore(&ipu_data.lock, flags); 1259 ichan->status = IPU_CHANNEL_READY; 1260 /* Continue to check for complete descriptor */ 1261 } 1262 } 1263 1264 /* Calculate and submit the next sg element */ 1265 sgnew = idmac_sg_next(ichan, &descnew, sgnew); 1266 1267 if (unlikely(!sg_next(*sg)) || !sgnext) { 1268 /* 1269 * Last element in scatterlist done, remove from the queue, 1270 * _init for debugging 1271 */ 1272 list_del_init(&desc->list); 1273 done = true; 1274 } 1275 1276 *sg = sgnew; 1277 1278 if (likely(sgnew) && 1279 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1280 callback = descnew->txd.callback; 1281 callback_param = descnew->txd.callback_param; 1282 list_del_init(&descnew->list); 1283 spin_unlock(&ichan->lock); 1284 if (callback) 1285 callback(callback_param); 1286 spin_lock(&ichan->lock); 1287 } 1288 1289 /* Flip the active buffer - even if update above failed */ 1290 ichan->active_buffer = !ichan->active_buffer; 1291 if (done) 1292 dma_cookie_complete(&desc->txd); 1293 1294 callback = desc->txd.callback; 1295 callback_param = desc->txd.callback_param; 1296 1297 spin_unlock(&ichan->lock); 1298 1299 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) 1300 callback(callback_param); 1301 1302 return IRQ_HANDLED; 1303} 1304 1305static void ipu_gc_tasklet(unsigned long arg) 1306{ 1307 struct ipu *ipu = (struct ipu *)arg; 1308 int i; 1309 1310 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1311 struct idmac_channel *ichan = ipu->channel + i; 1312 struct idmac_tx_desc *desc; 1313 unsigned long flags; 1314 struct scatterlist *sg; 1315 int j, k; 1316 1317 for (j = 0; j < ichan->n_tx_desc; j++) { 1318 desc = ichan->desc + j; 1319 spin_lock_irqsave(&ichan->lock, flags); 1320 if (async_tx_test_ack(&desc->txd)) { 1321 list_move(&desc->list, &ichan->free_list); 1322 for_each_sg(desc->sg, sg, desc->sg_len, k) { 1323 if (ichan->sg[0] == sg) 1324 ichan->sg[0] = NULL; 1325 else if (ichan->sg[1] == sg) 1326 ichan->sg[1] = NULL; 1327 } 1328 async_tx_clear_ack(&desc->txd); 1329 } 1330 spin_unlock_irqrestore(&ichan->lock, flags); 1331 } 1332 } 1333} 1334 1335/* Allocate and initialise a transfer descriptor. */ 1336static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1337 struct scatterlist *sgl, unsigned int sg_len, 1338 enum dma_transfer_direction direction, unsigned long tx_flags, 1339 void *context) 1340{ 1341 struct idmac_channel *ichan = to_idmac_chan(chan); 1342 struct idmac_tx_desc *desc = NULL; 1343 struct dma_async_tx_descriptor *txd = NULL; 1344 unsigned long flags; 1345 1346 /* We only can handle these three channels so far */ 1347 if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && 1348 chan->chan_id != IDMAC_IC_7) 1349 return NULL; 1350 1351 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { 1352 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); 1353 return NULL; 1354 } 1355 1356 mutex_lock(&ichan->chan_mutex); 1357 1358 spin_lock_irqsave(&ichan->lock, flags); 1359 if (!list_empty(&ichan->free_list)) { 1360 desc = list_entry(ichan->free_list.next, 1361 struct idmac_tx_desc, list); 1362 1363 list_del_init(&desc->list); 1364 1365 desc->sg_len = sg_len; 1366 desc->sg = sgl; 1367 txd = &desc->txd; 1368 txd->flags = tx_flags; 1369 } 1370 spin_unlock_irqrestore(&ichan->lock, flags); 1371 1372 mutex_unlock(&ichan->chan_mutex); 1373 1374 tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet); 1375 1376 return txd; 1377} 1378 1379/* Re-select the current buffer and re-activate the channel */ 1380static void idmac_issue_pending(struct dma_chan *chan) 1381{ 1382 struct idmac_channel *ichan = to_idmac_chan(chan); 1383 struct idmac *idmac = to_idmac(chan->device); 1384 struct ipu *ipu = to_ipu(idmac); 1385 unsigned long flags; 1386 1387 /* This is not always needed, but doesn't hurt either */ 1388 spin_lock_irqsave(&ipu->lock, flags); 1389 ipu_select_buffer(chan->chan_id, ichan->active_buffer); 1390 spin_unlock_irqrestore(&ipu->lock, flags); 1391 1392 /* 1393 * Might need to perform some parts of initialisation from 1394 * ipu_enable_channel(), but not all, we do not want to reset to buffer 1395 * 0, don't need to set priority again either, but re-enabling the task 1396 * and the channel might be a good idea. 1397 */ 1398} 1399 1400static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1401 unsigned long arg) 1402{ 1403 struct idmac_channel *ichan = to_idmac_chan(chan); 1404 struct idmac *idmac = to_idmac(chan->device); 1405 struct ipu *ipu = to_ipu(idmac); 1406 struct list_head *list, *tmp; 1407 unsigned long flags; 1408 int i; 1409 1410 switch (cmd) { 1411 case DMA_PAUSE: 1412 spin_lock_irqsave(&ipu->lock, flags); 1413 ipu_ic_disable_task(ipu, chan->chan_id); 1414 1415 /* Return all descriptors into "prepared" state */ 1416 list_for_each_safe(list, tmp, &ichan->queue) 1417 list_del_init(list); 1418 1419 ichan->sg[0] = NULL; 1420 ichan->sg[1] = NULL; 1421 1422 spin_unlock_irqrestore(&ipu->lock, flags); 1423 1424 ichan->status = IPU_CHANNEL_INITIALIZED; 1425 break; 1426 case DMA_TERMINATE_ALL: 1427 ipu_disable_channel(idmac, ichan, 1428 ichan->status >= IPU_CHANNEL_ENABLED); 1429 1430 tasklet_disable(&ipu->tasklet); 1431 1432 /* ichan->queue is modified in ISR, have to spinlock */ 1433 spin_lock_irqsave(&ichan->lock, flags); 1434 list_splice_init(&ichan->queue, &ichan->free_list); 1435 1436 if (ichan->desc) 1437 for (i = 0; i < ichan->n_tx_desc; i++) { 1438 struct idmac_tx_desc *desc = ichan->desc + i; 1439 if (list_empty(&desc->list)) 1440 /* Descriptor was prepared, but not submitted */ 1441 list_add(&desc->list, &ichan->free_list); 1442 1443 async_tx_clear_ack(&desc->txd); 1444 } 1445 1446 ichan->sg[0] = NULL; 1447 ichan->sg[1] = NULL; 1448 spin_unlock_irqrestore(&ichan->lock, flags); 1449 1450 tasklet_enable(&ipu->tasklet); 1451 1452 ichan->status = IPU_CHANNEL_INITIALIZED; 1453 break; 1454 default: 1455 return -ENOSYS; 1456 } 1457 1458 return 0; 1459} 1460 1461static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1462 unsigned long arg) 1463{ 1464 struct idmac_channel *ichan = to_idmac_chan(chan); 1465 int ret; 1466 1467 mutex_lock(&ichan->chan_mutex); 1468 1469 ret = __idmac_control(chan, cmd, arg); 1470 1471 mutex_unlock(&ichan->chan_mutex); 1472 1473 return ret; 1474} 1475 1476#ifdef DEBUG 1477static irqreturn_t ic_sof_irq(int irq, void *dev_id) 1478{ 1479 struct idmac_channel *ichan = dev_id; 1480 printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", 1481 irq, ichan->dma_chan.chan_id); 1482 disable_irq_nosync(irq); 1483 return IRQ_HANDLED; 1484} 1485 1486static irqreturn_t ic_eof_irq(int irq, void *dev_id) 1487{ 1488 struct idmac_channel *ichan = dev_id; 1489 printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", 1490 irq, ichan->dma_chan.chan_id); 1491 disable_irq_nosync(irq); 1492 return IRQ_HANDLED; 1493} 1494 1495static int ic_sof = -EINVAL, ic_eof = -EINVAL; 1496#endif 1497 1498static int idmac_alloc_chan_resources(struct dma_chan *chan) 1499{ 1500 struct idmac_channel *ichan = to_idmac_chan(chan); 1501 struct idmac *idmac = to_idmac(chan->device); 1502 int ret; 1503 1504 /* dmaengine.c now guarantees to only offer free channels */ 1505 BUG_ON(chan->client_count > 1); 1506 WARN_ON(ichan->status != IPU_CHANNEL_FREE); 1507 1508 dma_cookie_init(chan); 1509 1510 ret = ipu_irq_map(chan->chan_id); 1511 if (ret < 0) 1512 goto eimap; 1513 1514 ichan->eof_irq = ret; 1515 1516 /* 1517 * Important to first disable the channel, because maybe someone 1518 * used it before us, e.g., the bootloader 1519 */ 1520 ipu_disable_channel(idmac, ichan, true); 1521 1522 ret = ipu_init_channel(idmac, ichan); 1523 if (ret < 0) 1524 goto eichan; 1525 1526 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, 1527 ichan->eof_name, ichan); 1528 if (ret < 0) 1529 goto erirq; 1530 1531#ifdef DEBUG 1532 if (chan->chan_id == IDMAC_IC_7) { 1533 ic_sof = ipu_irq_map(69); 1534 if (ic_sof > 0) 1535 request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); 1536 ic_eof = ipu_irq_map(70); 1537 if (ic_eof > 0) 1538 request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); 1539 } 1540#endif 1541 1542 ichan->status = IPU_CHANNEL_INITIALIZED; 1543 1544 dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", 1545 chan->chan_id, ichan->eof_irq); 1546 1547 return ret; 1548 1549erirq: 1550 ipu_uninit_channel(idmac, ichan); 1551eichan: 1552 ipu_irq_unmap(chan->chan_id); 1553eimap: 1554 return ret; 1555} 1556 1557static void idmac_free_chan_resources(struct dma_chan *chan) 1558{ 1559 struct idmac_channel *ichan = to_idmac_chan(chan); 1560 struct idmac *idmac = to_idmac(chan->device); 1561 1562 mutex_lock(&ichan->chan_mutex); 1563 1564 __idmac_control(chan, DMA_TERMINATE_ALL, 0); 1565 1566 if (ichan->status > IPU_CHANNEL_FREE) { 1567#ifdef DEBUG 1568 if (chan->chan_id == IDMAC_IC_7) { 1569 if (ic_sof > 0) { 1570 free_irq(ic_sof, ichan); 1571 ipu_irq_unmap(69); 1572 ic_sof = -EINVAL; 1573 } 1574 if (ic_eof > 0) { 1575 free_irq(ic_eof, ichan); 1576 ipu_irq_unmap(70); 1577 ic_eof = -EINVAL; 1578 } 1579 } 1580#endif 1581 free_irq(ichan->eof_irq, ichan); 1582 ipu_irq_unmap(chan->chan_id); 1583 } 1584 1585 ichan->status = IPU_CHANNEL_FREE; 1586 1587 ipu_uninit_channel(idmac, ichan); 1588 1589 mutex_unlock(&ichan->chan_mutex); 1590 1591 tasklet_schedule(&to_ipu(idmac)->tasklet); 1592} 1593 1594static enum dma_status idmac_tx_status(struct dma_chan *chan, 1595 dma_cookie_t cookie, struct dma_tx_state *txstate) 1596{ 1597 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); 1598 if (cookie != chan->cookie) 1599 return DMA_ERROR; 1600 return DMA_SUCCESS; 1601} 1602 1603static int __init ipu_idmac_init(struct ipu *ipu) 1604{ 1605 struct idmac *idmac = &ipu->idmac; 1606 struct dma_device *dma = &idmac->dma; 1607 int i; 1608 1609 dma_cap_set(DMA_SLAVE, dma->cap_mask); 1610 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 1611 1612 /* Compulsory common fields */ 1613 dma->dev = ipu->dev; 1614 dma->device_alloc_chan_resources = idmac_alloc_chan_resources; 1615 dma->device_free_chan_resources = idmac_free_chan_resources; 1616 dma->device_tx_status = idmac_tx_status; 1617 dma->device_issue_pending = idmac_issue_pending; 1618 1619 /* Compulsory for DMA_SLAVE fields */ 1620 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1621 dma->device_control = idmac_control; 1622 1623 INIT_LIST_HEAD(&dma->channels); 1624 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1625 struct idmac_channel *ichan = ipu->channel + i; 1626 struct dma_chan *dma_chan = &ichan->dma_chan; 1627 1628 spin_lock_init(&ichan->lock); 1629 mutex_init(&ichan->chan_mutex); 1630 1631 ichan->status = IPU_CHANNEL_FREE; 1632 ichan->sec_chan_en = false; 1633 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); 1634 1635 dma_chan->device = &idmac->dma; 1636 dma_cookie_init(dma_chan); 1637 dma_chan->chan_id = i; 1638 list_add_tail(&dma_chan->device_node, &dma->channels); 1639 } 1640 1641 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); 1642 1643 return dma_async_device_register(&idmac->dma); 1644} 1645 1646static void __exit ipu_idmac_exit(struct ipu *ipu) 1647{ 1648 int i; 1649 struct idmac *idmac = &ipu->idmac; 1650 1651 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1652 struct idmac_channel *ichan = ipu->channel + i; 1653 1654 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); 1655 } 1656 1657 dma_async_device_unregister(&idmac->dma); 1658} 1659 1660/***************************************************************************** 1661 * IPU common probe / remove 1662 */ 1663 1664static int __init ipu_probe(struct platform_device *pdev) 1665{ 1666 struct ipu_platform_data *pdata = pdev->dev.platform_data; 1667 struct resource *mem_ipu, *mem_ic; 1668 int ret; 1669 1670 spin_lock_init(&ipu_data.lock); 1671 1672 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1673 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1674 if (!pdata || !mem_ipu || !mem_ic) 1675 return -EINVAL; 1676 1677 ipu_data.dev = &pdev->dev; 1678 1679 platform_set_drvdata(pdev, &ipu_data); 1680 1681 ret = platform_get_irq(pdev, 0); 1682 if (ret < 0) 1683 goto err_noirq; 1684 1685 ipu_data.irq_fn = ret; 1686 ret = platform_get_irq(pdev, 1); 1687 if (ret < 0) 1688 goto err_noirq; 1689 1690 ipu_data.irq_err = ret; 1691 ipu_data.irq_base = pdata->irq_base; 1692 1693 dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", 1694 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); 1695 1696 /* Remap IPU common registers */ 1697 ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); 1698 if (!ipu_data.reg_ipu) { 1699 ret = -ENOMEM; 1700 goto err_ioremap_ipu; 1701 } 1702 1703 /* Remap Image Converter and Image DMA Controller registers */ 1704 ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); 1705 if (!ipu_data.reg_ic) { 1706 ret = -ENOMEM; 1707 goto err_ioremap_ic; 1708 } 1709 1710 /* Get IPU clock */ 1711 ipu_data.ipu_clk = clk_get(&pdev->dev, NULL); 1712 if (IS_ERR(ipu_data.ipu_clk)) { 1713 ret = PTR_ERR(ipu_data.ipu_clk); 1714 goto err_clk_get; 1715 } 1716 1717 /* Make sure IPU HSP clock is running */ 1718 clk_enable(ipu_data.ipu_clk); 1719 1720 /* Disable all interrupts */ 1721 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); 1722 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2); 1723 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3); 1724 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4); 1725 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5); 1726 1727 dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name, 1728 (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err); 1729 1730 ret = ipu_irq_attach_irq(&ipu_data, pdev); 1731 if (ret < 0) 1732 goto err_attach_irq; 1733 1734 /* Initialize DMA engine */ 1735 ret = ipu_idmac_init(&ipu_data); 1736 if (ret < 0) 1737 goto err_idmac_init; 1738 1739 tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data); 1740 1741 ipu_data.dev = &pdev->dev; 1742 1743 dev_dbg(ipu_data.dev, "IPU initialized\n"); 1744 1745 return 0; 1746 1747err_idmac_init: 1748err_attach_irq: 1749 ipu_irq_detach_irq(&ipu_data, pdev); 1750 clk_disable(ipu_data.ipu_clk); 1751 clk_put(ipu_data.ipu_clk); 1752err_clk_get: 1753 iounmap(ipu_data.reg_ic); 1754err_ioremap_ic: 1755 iounmap(ipu_data.reg_ipu); 1756err_ioremap_ipu: 1757err_noirq: 1758 dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret); 1759 return ret; 1760} 1761 1762static int __exit ipu_remove(struct platform_device *pdev) 1763{ 1764 struct ipu *ipu = platform_get_drvdata(pdev); 1765 1766 ipu_idmac_exit(ipu); 1767 ipu_irq_detach_irq(ipu, pdev); 1768 clk_disable(ipu->ipu_clk); 1769 clk_put(ipu->ipu_clk); 1770 iounmap(ipu->reg_ic); 1771 iounmap(ipu->reg_ipu); 1772 tasklet_kill(&ipu->tasklet); 1773 platform_set_drvdata(pdev, NULL); 1774 1775 return 0; 1776} 1777 1778/* 1779 * We need two MEM resources - with IPU-common and Image Converter registers, 1780 * including PF_CONF and IDMAC_* registers, and two IRQs - function and error 1781 */ 1782static struct platform_driver ipu_platform_driver = { 1783 .driver = { 1784 .name = "ipu-core", 1785 .owner = THIS_MODULE, 1786 }, 1787 .remove = __exit_p(ipu_remove), 1788}; 1789 1790static int __init ipu_init(void) 1791{ 1792 return platform_driver_probe(&ipu_platform_driver, ipu_probe); 1793} 1794subsys_initcall(ipu_init); 1795 1796MODULE_DESCRIPTION("IPU core driver"); 1797MODULE_LICENSE("GPL v2"); 1798MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); 1799MODULE_ALIAS("platform:ipu-core");