Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.2-rc5 1501 lines 41 kB view raw
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * R-Car Gen3 Digital Radio Interface (DRIF) driver 4 * 5 * Copyright (C) 2017 Renesas Electronics Corporation 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 */ 12 13/* 14 * The R-Car DRIF is a receive only MSIOF like controller with an 15 * external master device driving the SCK. It receives data into a FIFO, 16 * then this driver uses the SYS-DMAC engine to move the data from 17 * the device to memory. 18 * 19 * Each DRIF channel DRIFx (as per datasheet) contains two internal 20 * channels DRIFx0 & DRIFx1 within itself with each having its own resources 21 * like module clk, register set, irq and dma. These internal channels share 22 * common CLK & SYNC from master. The two data pins D0 & D1 shall be 23 * considered to represent the two internal channels. This internal split 24 * is not visible to the master device. 25 * 26 * Depending on the master device, a DRIF channel can use 27 * (1) both internal channels (D0 & D1) to receive data in parallel (or) 28 * (2) one internal channel (D0 or D1) to receive data 29 * 30 * The primary design goal of this controller is to act as a Digital Radio 31 * Interface that receives digital samples from a tuner device. Hence the 32 * driver exposes the device as a V4L2 SDR device. In order to qualify as 33 * a V4L2 SDR device, it should possess a tuner interface as mandated by the 34 * framework. This driver expects a tuner driver (sub-device) to bind 35 * asynchronously with this device and the combined drivers shall expose 36 * a V4L2 compliant SDR device. The DRIF driver is independent of the 37 * tuner vendor. 38 * 39 * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode. 40 * This driver is tested for I2S mode only because of the availability of 41 * suitable master devices. Hence, not all configurable options of DRIF h/w 42 * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults 43 * are used. These can be exposed later if needed after testing. 44 */ 45#include <linux/bitops.h> 46#include <linux/clk.h> 47#include <linux/dma-mapping.h> 48#include <linux/dmaengine.h> 49#include <linux/ioctl.h> 50#include <linux/iopoll.h> 51#include <linux/module.h> 52#include <linux/of_graph.h> 53#include <linux/of_device.h> 54#include <linux/platform_device.h> 55#include <linux/sched.h> 56#include <media/v4l2-async.h> 57#include <media/v4l2-ctrls.h> 58#include <media/v4l2-device.h> 59#include <media/v4l2-event.h> 60#include <media/v4l2-fh.h> 61#include <media/v4l2-ioctl.h> 62#include <media/videobuf2-v4l2.h> 63#include <media/videobuf2-vmalloc.h> 64 65/* DRIF register offsets */ 66#define RCAR_DRIF_SITMDR1 0x00 67#define RCAR_DRIF_SITMDR2 0x04 68#define RCAR_DRIF_SITMDR3 0x08 69#define RCAR_DRIF_SIRMDR1 0x10 70#define RCAR_DRIF_SIRMDR2 0x14 71#define RCAR_DRIF_SIRMDR3 0x18 72#define RCAR_DRIF_SICTR 0x28 73#define RCAR_DRIF_SIFCTR 0x30 74#define RCAR_DRIF_SISTR 0x40 75#define RCAR_DRIF_SIIER 0x44 76#define RCAR_DRIF_SIRFDR 0x60 77 78#define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */ 79#define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */ 80#define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */ 81#define RCAR_DRIF_REOF BIT(7) /* Frame reception end */ 82#define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */ 83#define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */ 84 85/* SIRMDR1 */ 86#define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28) 87#define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28) 88 89#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25) 90#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25) 91 92#define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24) 93#define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24) 94 95#define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20) 96#define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20) 97#define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20) 98#define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20) 99#define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20) 100 101#define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20) 102#define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20) 103#define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20) 104#define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20) 105#define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20) 106#define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20) 107 108#define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30) 109#define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24) 110#define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16) 111 112/* Hidden Transmit register that controls CLK & SYNC */ 113#define RCAR_DRIF_SITMDR1_PCON BIT(30) 114 115#define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26) 116#define RCAR_DRIF_SICTR_RX_EN BIT(8) 117#define RCAR_DRIF_SICTR_RESET BIT(0) 118 119/* Constants */ 120#define RCAR_DRIF_NUM_HWBUFS 32 121#define RCAR_DRIF_MAX_DEVS 4 122#define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16 123#define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE) 124#define RCAR_DRIF_MAX_CHANNEL 2 125#define RCAR_SDR_BUFFER_SIZE SZ_64K 126 127/* Internal buffer status flags */ 128#define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */ 129#define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */ 130 131#define to_rcar_drif_buf_pair(sdr, ch_num, idx) \ 132 (&((sdr)->ch[!(ch_num)]->buf[(idx)])) 133 134#define for_each_rcar_drif_channel(ch, ch_mask) \ 135 for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL) 136 137/* Debug */ 138#define rdrif_dbg(sdr, fmt, arg...) \ 139 dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg) 140 141#define rdrif_err(sdr, fmt, arg...) \ 142 dev_err(sdr->v4l2_dev.dev, fmt, ## arg) 143 144/* Stream formats */ 145struct rcar_drif_format { 146 u32 pixelformat; 147 u32 buffersize; 148 u32 bitlen; 149 u32 wdcnt; 150 u32 num_ch; 151}; 152 153/* Format descriptions for capture */ 154static const struct rcar_drif_format formats[] = { 155 { 156 .pixelformat = V4L2_SDR_FMT_PCU16BE, 157 .buffersize = RCAR_SDR_BUFFER_SIZE, 158 .bitlen = 16, 159 .wdcnt = 1, 160 .num_ch = 2, 161 }, 162 { 163 .pixelformat = V4L2_SDR_FMT_PCU18BE, 164 .buffersize = RCAR_SDR_BUFFER_SIZE, 165 .bitlen = 18, 166 .wdcnt = 1, 167 .num_ch = 2, 168 }, 169 { 170 .pixelformat = V4L2_SDR_FMT_PCU20BE, 171 .buffersize = RCAR_SDR_BUFFER_SIZE, 172 .bitlen = 20, 173 .wdcnt = 1, 174 .num_ch = 2, 175 }, 176}; 177 178/* Buffer for a received frame from one or both internal channels */ 179struct rcar_drif_frame_buf { 180 /* Common v4l buffer stuff -- must be first */ 181 struct vb2_v4l2_buffer vb; 182 struct list_head list; 183}; 184 185/* OF graph endpoint's V4L2 async data */ 186struct rcar_drif_graph_ep { 187 struct v4l2_subdev *subdev; /* Async matched subdev */ 188 struct v4l2_async_subdev asd; /* Async sub-device descriptor */ 189}; 190 191/* DMA buffer */ 192struct rcar_drif_hwbuf { 193 void *addr; /* CPU-side address */ 194 unsigned int status; /* Buffer status flags */ 195}; 196 197/* Internal channel */ 198struct rcar_drif { 199 struct rcar_drif_sdr *sdr; /* Group device */ 200 struct platform_device *pdev; /* Channel's pdev */ 201 void __iomem *base; /* Base register address */ 202 resource_size_t start; /* I/O resource offset */ 203 struct dma_chan *dmach; /* Reserved DMA channel */ 204 struct clk *clk; /* Module clock */ 205 struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */ 206 dma_addr_t dma_handle; /* Handle for all bufs */ 207 unsigned int num; /* Channel number */ 208 bool acting_sdr; /* Channel acting as SDR device */ 209}; 210 211/* DRIF V4L2 SDR */ 212struct rcar_drif_sdr { 213 struct device *dev; /* Platform device */ 214 struct video_device *vdev; /* V4L2 SDR device */ 215 struct v4l2_device v4l2_dev; /* V4L2 device */ 216 217 /* Videobuf2 queue and queued buffers list */ 218 struct vb2_queue vb_queue; 219 struct list_head queued_bufs; 220 spinlock_t queued_bufs_lock; /* Protects queued_bufs */ 221 spinlock_t dma_lock; /* To serialize DMA cb of channels */ 222 223 struct mutex v4l2_mutex; /* To serialize ioctls */ 224 struct mutex vb_queue_mutex; /* To serialize streaming ioctls */ 225 struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */ 226 struct v4l2_async_notifier notifier; /* For subdev (tuner) */ 227 struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */ 228 229 /* Current V4L2 SDR format ptr */ 230 const struct rcar_drif_format *fmt; 231 232 /* Device tree SYNC properties */ 233 u32 mdr1; 234 235 /* Internals */ 236 struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */ 237 unsigned long hw_ch_mask; /* Enabled channels per DT */ 238 unsigned long cur_ch_mask; /* Used channels for an SDR FMT */ 239 u32 num_hw_ch; /* Num of DT enabled channels */ 240 u32 num_cur_ch; /* Num of used channels */ 241 u32 hwbuf_size; /* Each DMA buffer size */ 242 u32 produced; /* Buffers produced by sdr dev */ 243}; 244 245/* Register access functions */ 246static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data) 247{ 248 writel(data, ch->base + offset); 249} 250 251static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset) 252{ 253 return readl(ch->base + offset); 254} 255 256/* Release DMA channels */ 257static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr) 258{ 259 unsigned int i; 260 261 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) 262 if (sdr->ch[i]->dmach) { 263 dma_release_channel(sdr->ch[i]->dmach); 264 sdr->ch[i]->dmach = NULL; 265 } 266} 267 268/* Allocate DMA channels */ 269static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr) 270{ 271 struct dma_slave_config dma_cfg; 272 unsigned int i; 273 int ret; 274 275 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 276 struct rcar_drif *ch = sdr->ch[i]; 277 278 ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx"); 279 if (!ch->dmach) { 280 rdrif_err(sdr, "ch%u: dma channel req failed\n", i); 281 ret = -ENODEV; 282 goto dmach_error; 283 } 284 285 /* Configure slave */ 286 memset(&dma_cfg, 0, sizeof(dma_cfg)); 287 dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR); 288 dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 289 ret = dmaengine_slave_config(ch->dmach, &dma_cfg); 290 if (ret) { 291 rdrif_err(sdr, "ch%u: dma slave config failed\n", i); 292 goto dmach_error; 293 } 294 } 295 return 0; 296 297dmach_error: 298 rcar_drif_release_dmachannels(sdr); 299 return ret; 300} 301 302/* Release queued vb2 buffers */ 303static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr, 304 enum vb2_buffer_state state) 305{ 306 struct rcar_drif_frame_buf *fbuf, *tmp; 307 unsigned long flags; 308 309 spin_lock_irqsave(&sdr->queued_bufs_lock, flags); 310 list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) { 311 list_del(&fbuf->list); 312 vb2_buffer_done(&fbuf->vb.vb2_buf, state); 313 } 314 spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); 315} 316 317/* Set MDR defaults */ 318static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr) 319{ 320 unsigned int i; 321 322 /* Set defaults for enabled internal channels */ 323 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 324 /* Refer MSIOF section in manual for this register setting */ 325 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1, 326 RCAR_DRIF_SITMDR1_PCON); 327 328 /* Setup MDR1 value */ 329 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1); 330 331 rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x", 332 i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1)); 333 } 334} 335 336/* Set DRIF receive format */ 337static int rcar_drif_set_format(struct rcar_drif_sdr *sdr) 338{ 339 unsigned int i; 340 341 rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n", 342 sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch); 343 344 /* Sanity check */ 345 if (sdr->fmt->num_ch > sdr->num_cur_ch) { 346 rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n", 347 sdr->fmt->num_ch, sdr->num_cur_ch); 348 return -EINVAL; 349 } 350 351 /* Setup group, bitlen & wdcnt */ 352 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 353 u32 mdr; 354 355 /* Two groups */ 356 mdr = RCAR_DRIF_MDR_GRPCNT(2) | 357 RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) | 358 RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt); 359 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr); 360 361 mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) | 362 RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt); 363 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr); 364 365 rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n", 366 i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2), 367 rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3)); 368 } 369 return 0; 370} 371 372/* Release DMA buffers */ 373static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr) 374{ 375 unsigned int i; 376 377 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 378 struct rcar_drif *ch = sdr->ch[i]; 379 380 /* First entry contains the dma buf ptr */ 381 if (ch->buf[0].addr) { 382 dma_free_coherent(&ch->pdev->dev, 383 sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, 384 ch->buf[0].addr, ch->dma_handle); 385 ch->buf[0].addr = NULL; 386 } 387 } 388} 389 390/* Request DMA buffers */ 391static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr) 392{ 393 int ret = -ENOMEM; 394 unsigned int i, j; 395 void *addr; 396 397 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 398 struct rcar_drif *ch = sdr->ch[i]; 399 400 /* Allocate DMA buffers */ 401 addr = dma_alloc_coherent(&ch->pdev->dev, 402 sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, 403 &ch->dma_handle, GFP_KERNEL); 404 if (!addr) { 405 rdrif_err(sdr, 406 "ch%u: dma alloc failed. num hwbufs %u size %u\n", 407 i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size); 408 goto error; 409 } 410 411 /* Split the chunk and populate bufctxt */ 412 for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) { 413 ch->buf[j].addr = addr + (j * sdr->hwbuf_size); 414 ch->buf[j].status = 0; 415 } 416 } 417 return 0; 418error: 419 return ret; 420} 421 422/* Setup vb_queue minimum buffer requirements */ 423static int rcar_drif_queue_setup(struct vb2_queue *vq, 424 unsigned int *num_buffers, unsigned int *num_planes, 425 unsigned int sizes[], struct device *alloc_devs[]) 426{ 427 struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); 428 429 /* Need at least 16 buffers */ 430 if (vq->num_buffers + *num_buffers < 16) 431 *num_buffers = 16 - vq->num_buffers; 432 433 *num_planes = 1; 434 sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize); 435 rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]); 436 437 return 0; 438} 439 440/* Enqueue buffer */ 441static void rcar_drif_buf_queue(struct vb2_buffer *vb) 442{ 443 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 444 struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue); 445 struct rcar_drif_frame_buf *fbuf = 446 container_of(vbuf, struct rcar_drif_frame_buf, vb); 447 unsigned long flags; 448 449 rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index); 450 spin_lock_irqsave(&sdr->queued_bufs_lock, flags); 451 list_add_tail(&fbuf->list, &sdr->queued_bufs); 452 spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); 453} 454 455/* Get a frame buf from list */ 456static struct rcar_drif_frame_buf * 457rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr) 458{ 459 struct rcar_drif_frame_buf *fbuf; 460 unsigned long flags; 461 462 spin_lock_irqsave(&sdr->queued_bufs_lock, flags); 463 fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct 464 rcar_drif_frame_buf, list); 465 if (!fbuf) { 466 /* 467 * App is late in enqueing buffers. Samples lost & there will 468 * be a gap in sequence number when app recovers 469 */ 470 rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced); 471 spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); 472 return NULL; 473 } 474 list_del(&fbuf->list); 475 spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); 476 477 return fbuf; 478} 479 480/* Helpers to set/clear buf pair status */ 481static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf) 482{ 483 return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE); 484} 485 486static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf) 487{ 488 return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW); 489} 490 491static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf, 492 unsigned int bit) 493{ 494 unsigned int i; 495 496 for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++) 497 buf[i]->status &= ~bit; 498} 499 500/* Channel DMA complete */ 501static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx) 502{ 503 u32 str; 504 505 ch->buf[idx].status |= RCAR_DRIF_BUF_DONE; 506 507 /* Check for DRIF errors */ 508 str = rcar_drif_read(ch, RCAR_DRIF_SISTR); 509 if (unlikely(str & RCAR_DRIF_RFOVF)) { 510 /* Writing the same clears it */ 511 rcar_drif_write(ch, RCAR_DRIF_SISTR, str); 512 513 /* Overflow: some samples are lost */ 514 ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW; 515 } 516} 517 518/* DMA callback for each stage */ 519static void rcar_drif_dma_complete(void *dma_async_param) 520{ 521 struct rcar_drif *ch = dma_async_param; 522 struct rcar_drif_sdr *sdr = ch->sdr; 523 struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL]; 524 struct rcar_drif_frame_buf *fbuf; 525 bool overflow = false; 526 u32 idx, produced; 527 unsigned int i; 528 529 spin_lock(&sdr->dma_lock); 530 531 /* DMA can be terminated while the callback was waiting on lock */ 532 if (!vb2_is_streaming(&sdr->vb_queue)) { 533 spin_unlock(&sdr->dma_lock); 534 return; 535 } 536 537 idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS; 538 rcar_drif_channel_complete(ch, idx); 539 540 if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) { 541 buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) : 542 &ch->buf[idx]; 543 buf[1] = ch->num ? &ch->buf[idx] : 544 to_rcar_drif_buf_pair(sdr, ch->num, idx); 545 546 /* Check if both DMA buffers are done */ 547 if (!rcar_drif_bufs_done(buf)) { 548 spin_unlock(&sdr->dma_lock); 549 return; 550 } 551 552 /* Clear buf done status */ 553 rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE); 554 555 if (rcar_drif_bufs_overflow(buf)) { 556 overflow = true; 557 /* Clear the flag in status */ 558 rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW); 559 } 560 } else { 561 buf[0] = &ch->buf[idx]; 562 if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) { 563 overflow = true; 564 /* Clear the flag in status */ 565 buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW; 566 } 567 } 568 569 /* Buffer produced for consumption */ 570 produced = sdr->produced++; 571 spin_unlock(&sdr->dma_lock); 572 573 rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced); 574 575 /* Get fbuf */ 576 fbuf = rcar_drif_get_fbuf(sdr); 577 if (!fbuf) 578 return; 579 580 for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++) 581 memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) + 582 i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size); 583 584 fbuf->vb.field = V4L2_FIELD_NONE; 585 fbuf->vb.sequence = produced; 586 fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); 587 vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize); 588 589 /* Set error state on overflow */ 590 vb2_buffer_done(&fbuf->vb.vb2_buf, 591 overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 592} 593 594static int rcar_drif_qbuf(struct rcar_drif *ch) 595{ 596 struct rcar_drif_sdr *sdr = ch->sdr; 597 dma_addr_t addr = ch->dma_handle; 598 struct dma_async_tx_descriptor *rxd; 599 dma_cookie_t cookie; 600 int ret = -EIO; 601 602 /* Setup cyclic DMA with given buffers */ 603 rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr, 604 sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, 605 sdr->hwbuf_size, DMA_DEV_TO_MEM, 606 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 607 if (!rxd) { 608 rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num); 609 return ret; 610 } 611 612 /* Submit descriptor */ 613 rxd->callback = rcar_drif_dma_complete; 614 rxd->callback_param = ch; 615 cookie = dmaengine_submit(rxd); 616 if (dma_submit_error(cookie)) { 617 rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num); 618 return ret; 619 } 620 621 dma_async_issue_pending(ch->dmach); 622 return 0; 623} 624 625/* Enable reception */ 626static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr) 627{ 628 unsigned int i; 629 u32 ctr; 630 int ret = -EINVAL; 631 632 /* 633 * When both internal channels are enabled, they can be synchronized 634 * only by the master 635 */ 636 637 /* Enable receive */ 638 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 639 ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR); 640 ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE | 641 RCAR_DRIF_SICTR_RX_EN); 642 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr); 643 } 644 645 /* Check receive enabled */ 646 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 647 ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR, 648 ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000); 649 if (ret) { 650 rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i, 651 rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR)); 652 break; 653 } 654 } 655 return ret; 656} 657 658/* Disable reception */ 659static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr) 660{ 661 unsigned int i; 662 u32 ctr; 663 int ret; 664 665 /* Disable receive */ 666 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 667 ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR); 668 ctr &= ~RCAR_DRIF_SICTR_RX_EN; 669 rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr); 670 } 671 672 /* Check receive disabled */ 673 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 674 ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR, 675 ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000); 676 if (ret) 677 dev_warn(&sdr->vdev->dev, 678 "ch%u: failed to disable rx. ctr 0x%08x\n", 679 i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR)); 680 } 681} 682 683/* Stop channel */ 684static void rcar_drif_stop_channel(struct rcar_drif *ch) 685{ 686 /* Disable DMA receive interrupt */ 687 rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000); 688 689 /* Terminate all DMA transfers */ 690 dmaengine_terminate_sync(ch->dmach); 691} 692 693/* Stop receive operation */ 694static void rcar_drif_stop(struct rcar_drif_sdr *sdr) 695{ 696 unsigned int i; 697 698 /* Disable Rx */ 699 rcar_drif_disable_rx(sdr); 700 701 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) 702 rcar_drif_stop_channel(sdr->ch[i]); 703} 704 705/* Start channel */ 706static int rcar_drif_start_channel(struct rcar_drif *ch) 707{ 708 struct rcar_drif_sdr *sdr = ch->sdr; 709 u32 ctr, str; 710 int ret; 711 712 /* Reset receive */ 713 rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET); 714 ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr, 715 !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000); 716 if (ret) { 717 rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n", 718 ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR)); 719 return ret; 720 } 721 722 /* Queue buffers for DMA */ 723 ret = rcar_drif_qbuf(ch); 724 if (ret) 725 return ret; 726 727 /* Clear status register flags */ 728 str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR | 729 RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF; 730 rcar_drif_write(ch, RCAR_DRIF_SISTR, str); 731 732 /* Enable DMA receive interrupt */ 733 rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000); 734 735 return ret; 736} 737 738/* Start receive operation */ 739static int rcar_drif_start(struct rcar_drif_sdr *sdr) 740{ 741 unsigned long enabled = 0; 742 unsigned int i; 743 int ret; 744 745 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 746 ret = rcar_drif_start_channel(sdr->ch[i]); 747 if (ret) 748 goto start_error; 749 enabled |= BIT(i); 750 } 751 752 ret = rcar_drif_enable_rx(sdr); 753 if (ret) 754 goto enable_error; 755 756 sdr->produced = 0; 757 return ret; 758 759enable_error: 760 rcar_drif_disable_rx(sdr); 761start_error: 762 for_each_rcar_drif_channel(i, &enabled) 763 rcar_drif_stop_channel(sdr->ch[i]); 764 765 return ret; 766} 767 768/* Start streaming */ 769static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count) 770{ 771 struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); 772 unsigned long enabled = 0; 773 unsigned int i; 774 int ret; 775 776 mutex_lock(&sdr->v4l2_mutex); 777 778 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { 779 ret = clk_prepare_enable(sdr->ch[i]->clk); 780 if (ret) 781 goto error; 782 enabled |= BIT(i); 783 } 784 785 /* Set default MDRx settings */ 786 rcar_drif_set_mdr1(sdr); 787 788 /* Set new format */ 789 ret = rcar_drif_set_format(sdr); 790 if (ret) 791 goto error; 792 793 if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) 794 sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL; 795 else 796 sdr->hwbuf_size = sdr->fmt->buffersize; 797 798 rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n", 799 RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size); 800 801 /* Alloc DMA channel */ 802 ret = rcar_drif_alloc_dmachannels(sdr); 803 if (ret) 804 goto error; 805 806 /* Request buffers */ 807 ret = rcar_drif_request_buf(sdr); 808 if (ret) 809 goto error; 810 811 /* Start Rx */ 812 ret = rcar_drif_start(sdr); 813 if (ret) 814 goto error; 815 816 mutex_unlock(&sdr->v4l2_mutex); 817 818 return ret; 819 820error: 821 rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED); 822 rcar_drif_release_buf(sdr); 823 rcar_drif_release_dmachannels(sdr); 824 for_each_rcar_drif_channel(i, &enabled) 825 clk_disable_unprepare(sdr->ch[i]->clk); 826 827 mutex_unlock(&sdr->v4l2_mutex); 828 829 return ret; 830} 831 832/* Stop streaming */ 833static void rcar_drif_stop_streaming(struct vb2_queue *vq) 834{ 835 struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); 836 unsigned int i; 837 838 mutex_lock(&sdr->v4l2_mutex); 839 840 /* Stop hardware streaming */ 841 rcar_drif_stop(sdr); 842 843 /* Return all queued buffers to vb2 */ 844 rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR); 845 846 /* Release buf */ 847 rcar_drif_release_buf(sdr); 848 849 /* Release DMA channel resources */ 850 rcar_drif_release_dmachannels(sdr); 851 852 for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) 853 clk_disable_unprepare(sdr->ch[i]->clk); 854 855 mutex_unlock(&sdr->v4l2_mutex); 856} 857 858/* Vb2 ops */ 859static const struct vb2_ops rcar_drif_vb2_ops = { 860 .queue_setup = rcar_drif_queue_setup, 861 .buf_queue = rcar_drif_buf_queue, 862 .start_streaming = rcar_drif_start_streaming, 863 .stop_streaming = rcar_drif_stop_streaming, 864 .wait_prepare = vb2_ops_wait_prepare, 865 .wait_finish = vb2_ops_wait_finish, 866}; 867 868static int rcar_drif_querycap(struct file *file, void *fh, 869 struct v4l2_capability *cap) 870{ 871 struct rcar_drif_sdr *sdr = video_drvdata(file); 872 873 strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); 874 strscpy(cap->card, sdr->vdev->name, sizeof(cap->card)); 875 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", 876 sdr->vdev->name); 877 878 return 0; 879} 880 881static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr) 882{ 883 unsigned int i; 884 885 for (i = 0; i < ARRAY_SIZE(formats); i++) { 886 /* Matching fmt based on required channels is set as default */ 887 if (sdr->num_hw_ch == formats[i].num_ch) { 888 sdr->fmt = &formats[i]; 889 sdr->cur_ch_mask = sdr->hw_ch_mask; 890 sdr->num_cur_ch = sdr->num_hw_ch; 891 dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n", 892 i, sdr->cur_ch_mask, sdr->num_cur_ch); 893 return 0; 894 } 895 } 896 return -EINVAL; 897} 898 899static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv, 900 struct v4l2_fmtdesc *f) 901{ 902 if (f->index >= ARRAY_SIZE(formats)) 903 return -EINVAL; 904 905 f->pixelformat = formats[f->index].pixelformat; 906 907 return 0; 908} 909 910static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv, 911 struct v4l2_format *f) 912{ 913 struct rcar_drif_sdr *sdr = video_drvdata(file); 914 915 f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; 916 f->fmt.sdr.buffersize = sdr->fmt->buffersize; 917 918 return 0; 919} 920 921static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv, 922 struct v4l2_format *f) 923{ 924 struct rcar_drif_sdr *sdr = video_drvdata(file); 925 struct vb2_queue *q = &sdr->vb_queue; 926 unsigned int i; 927 928 if (vb2_is_busy(q)) 929 return -EBUSY; 930 931 for (i = 0; i < ARRAY_SIZE(formats); i++) { 932 if (formats[i].pixelformat == f->fmt.sdr.pixelformat) 933 break; 934 } 935 936 if (i == ARRAY_SIZE(formats)) 937 i = 0; /* Set the 1st format as default on no match */ 938 939 sdr->fmt = &formats[i]; 940 f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; 941 f->fmt.sdr.buffersize = formats[i].buffersize; 942 memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); 943 944 /* 945 * If a format demands one channel only out of two 946 * enabled channels, pick the 0th channel. 947 */ 948 if (formats[i].num_ch < sdr->num_hw_ch) { 949 sdr->cur_ch_mask = BIT(0); 950 sdr->num_cur_ch = formats[i].num_ch; 951 } else { 952 sdr->cur_ch_mask = sdr->hw_ch_mask; 953 sdr->num_cur_ch = sdr->num_hw_ch; 954 } 955 956 rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n", 957 i, sdr->cur_ch_mask, sdr->num_cur_ch); 958 959 return 0; 960} 961 962static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv, 963 struct v4l2_format *f) 964{ 965 unsigned int i; 966 967 for (i = 0; i < ARRAY_SIZE(formats); i++) { 968 if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { 969 f->fmt.sdr.buffersize = formats[i].buffersize; 970 return 0; 971 } 972 } 973 974 f->fmt.sdr.pixelformat = formats[0].pixelformat; 975 f->fmt.sdr.buffersize = formats[0].buffersize; 976 memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); 977 978 return 0; 979} 980 981/* Tuner subdev ioctls */ 982static int rcar_drif_enum_freq_bands(struct file *file, void *priv, 983 struct v4l2_frequency_band *band) 984{ 985 struct rcar_drif_sdr *sdr = video_drvdata(file); 986 987 return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band); 988} 989 990static int rcar_drif_g_frequency(struct file *file, void *priv, 991 struct v4l2_frequency *f) 992{ 993 struct rcar_drif_sdr *sdr = video_drvdata(file); 994 995 return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f); 996} 997 998static int rcar_drif_s_frequency(struct file *file, void *priv, 999 const struct v4l2_frequency *f) 1000{ 1001 struct rcar_drif_sdr *sdr = video_drvdata(file); 1002 1003 return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f); 1004} 1005 1006static int rcar_drif_g_tuner(struct file *file, void *priv, 1007 struct v4l2_tuner *vt) 1008{ 1009 struct rcar_drif_sdr *sdr = video_drvdata(file); 1010 1011 return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt); 1012} 1013 1014static int rcar_drif_s_tuner(struct file *file, void *priv, 1015 const struct v4l2_tuner *vt) 1016{ 1017 struct rcar_drif_sdr *sdr = video_drvdata(file); 1018 1019 return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt); 1020} 1021 1022static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = { 1023 .vidioc_querycap = rcar_drif_querycap, 1024 1025 .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap, 1026 .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap, 1027 .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap, 1028 .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap, 1029 1030 .vidioc_reqbufs = vb2_ioctl_reqbufs, 1031 .vidioc_create_bufs = vb2_ioctl_create_bufs, 1032 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 1033 .vidioc_querybuf = vb2_ioctl_querybuf, 1034 .vidioc_qbuf = vb2_ioctl_qbuf, 1035 .vidioc_dqbuf = vb2_ioctl_dqbuf, 1036 1037 .vidioc_streamon = vb2_ioctl_streamon, 1038 .vidioc_streamoff = vb2_ioctl_streamoff, 1039 1040 .vidioc_s_frequency = rcar_drif_s_frequency, 1041 .vidioc_g_frequency = rcar_drif_g_frequency, 1042 .vidioc_s_tuner = rcar_drif_s_tuner, 1043 .vidioc_g_tuner = rcar_drif_g_tuner, 1044 .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands, 1045 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, 1046 .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 1047 .vidioc_log_status = v4l2_ctrl_log_status, 1048}; 1049 1050static const struct v4l2_file_operations rcar_drif_fops = { 1051 .owner = THIS_MODULE, 1052 .open = v4l2_fh_open, 1053 .release = vb2_fop_release, 1054 .read = vb2_fop_read, 1055 .poll = vb2_fop_poll, 1056 .mmap = vb2_fop_mmap, 1057 .unlocked_ioctl = video_ioctl2, 1058}; 1059 1060static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr) 1061{ 1062 int ret; 1063 1064 /* Init video_device structure */ 1065 sdr->vdev = video_device_alloc(); 1066 if (!sdr->vdev) 1067 return -ENOMEM; 1068 1069 snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF"); 1070 sdr->vdev->fops = &rcar_drif_fops; 1071 sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops; 1072 sdr->vdev->release = video_device_release; 1073 sdr->vdev->lock = &sdr->v4l2_mutex; 1074 sdr->vdev->queue = &sdr->vb_queue; 1075 sdr->vdev->queue->lock = &sdr->vb_queue_mutex; 1076 sdr->vdev->ctrl_handler = &sdr->ctrl_hdl; 1077 sdr->vdev->v4l2_dev = &sdr->v4l2_dev; 1078 sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER | 1079 V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; 1080 video_set_drvdata(sdr->vdev, sdr); 1081 1082 /* Register V4L2 SDR device */ 1083 ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1); 1084 if (ret) { 1085 video_device_release(sdr->vdev); 1086 sdr->vdev = NULL; 1087 dev_err(sdr->dev, "failed video_register_device (%d)\n", ret); 1088 } 1089 1090 return ret; 1091} 1092 1093static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr) 1094{ 1095 video_unregister_device(sdr->vdev); 1096 sdr->vdev = NULL; 1097} 1098 1099/* Sub-device bound callback */ 1100static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier, 1101 struct v4l2_subdev *subdev, 1102 struct v4l2_async_subdev *asd) 1103{ 1104 struct rcar_drif_sdr *sdr = 1105 container_of(notifier, struct rcar_drif_sdr, notifier); 1106 1107 if (sdr->ep.asd.match.fwnode != 1108 of_fwnode_handle(subdev->dev->of_node)) { 1109 rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name); 1110 return -EINVAL; 1111 } 1112 1113 v4l2_set_subdev_hostdata(subdev, sdr); 1114 sdr->ep.subdev = subdev; 1115 rdrif_dbg(sdr, "bound asd %s\n", subdev->name); 1116 1117 return 0; 1118} 1119 1120/* Sub-device unbind callback */ 1121static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier, 1122 struct v4l2_subdev *subdev, 1123 struct v4l2_async_subdev *asd) 1124{ 1125 struct rcar_drif_sdr *sdr = 1126 container_of(notifier, struct rcar_drif_sdr, notifier); 1127 1128 if (sdr->ep.subdev != subdev) { 1129 rdrif_err(sdr, "subdev %s is not bound\n", subdev->name); 1130 return; 1131 } 1132 1133 /* Free ctrl handler if initialized */ 1134 v4l2_ctrl_handler_free(&sdr->ctrl_hdl); 1135 sdr->v4l2_dev.ctrl_handler = NULL; 1136 sdr->ep.subdev = NULL; 1137 1138 rcar_drif_sdr_unregister(sdr); 1139 rdrif_dbg(sdr, "unbind asd %s\n", subdev->name); 1140} 1141 1142/* Sub-device registered notification callback */ 1143static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier) 1144{ 1145 struct rcar_drif_sdr *sdr = 1146 container_of(notifier, struct rcar_drif_sdr, notifier); 1147 int ret; 1148 1149 /* 1150 * The subdev tested at this point uses 4 controls. Using 10 as a worst 1151 * case scenario hint. When less controls are needed there will be some 1152 * unused memory and when more controls are needed the framework uses 1153 * hash to manage controls within this number. 1154 */ 1155 ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10); 1156 if (ret) 1157 return -ENOMEM; 1158 1159 sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl; 1160 ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev); 1161 if (ret) { 1162 rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret); 1163 goto error; 1164 } 1165 1166 ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl, 1167 sdr->ep.subdev->ctrl_handler, NULL, true); 1168 if (ret) { 1169 rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret); 1170 goto error; 1171 } 1172 1173 ret = rcar_drif_sdr_register(sdr); 1174 if (ret) 1175 goto error; 1176 1177 return ret; 1178 1179error: 1180 v4l2_ctrl_handler_free(&sdr->ctrl_hdl); 1181 1182 return ret; 1183} 1184 1185static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = { 1186 .bound = rcar_drif_notify_bound, 1187 .unbind = rcar_drif_notify_unbind, 1188 .complete = rcar_drif_notify_complete, 1189}; 1190 1191/* Read endpoint properties */ 1192static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr, 1193 struct fwnode_handle *fwnode) 1194{ 1195 u32 val; 1196 1197 /* Set the I2S defaults for SIRMDR1*/ 1198 sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST | 1199 RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0; 1200 1201 /* Parse sync polarity from endpoint */ 1202 if (!fwnode_property_read_u32(fwnode, "sync-active", &val)) 1203 sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH : 1204 RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW; 1205 else 1206 sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */ 1207 1208 dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1); 1209} 1210 1211/* Parse sub-devs (tuner) to find a matching device */ 1212static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) 1213{ 1214 struct v4l2_async_notifier *notifier = &sdr->notifier; 1215 struct fwnode_handle *fwnode, *ep; 1216 int ret; 1217 1218 v4l2_async_notifier_init(notifier); 1219 1220 ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node), 1221 NULL); 1222 if (!ep) 1223 return 0; 1224 1225 fwnode = fwnode_graph_get_remote_port_parent(ep); 1226 if (!fwnode) { 1227 dev_warn(sdr->dev, "bad remote port parent\n"); 1228 fwnode_handle_put(ep); 1229 return -EINVAL; 1230 } 1231 1232 sdr->ep.asd.match.fwnode = fwnode; 1233 sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE; 1234 ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd); 1235 if (ret) { 1236 fwnode_handle_put(fwnode); 1237 return ret; 1238 } 1239 1240 /* Get the endpoint properties */ 1241 rcar_drif_get_ep_properties(sdr, ep); 1242 1243 fwnode_handle_put(fwnode); 1244 fwnode_handle_put(ep); 1245 1246 return 0; 1247} 1248 1249/* Check if the given device is the primary bond */ 1250static bool rcar_drif_primary_bond(struct platform_device *pdev) 1251{ 1252 return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond"); 1253} 1254 1255/* Check if both devices of the bond are enabled */ 1256static struct device_node *rcar_drif_bond_enabled(struct platform_device *p) 1257{ 1258 struct device_node *np; 1259 1260 np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0); 1261 if (np && of_device_is_available(np)) 1262 return np; 1263 1264 return NULL; 1265} 1266 1267/* Check if the bonded device is probed */ 1268static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr, 1269 struct device_node *np) 1270{ 1271 struct platform_device *pdev; 1272 struct rcar_drif *ch; 1273 int ret = 0; 1274 1275 pdev = of_find_device_by_node(np); 1276 if (!pdev) { 1277 dev_err(sdr->dev, "failed to get bonded device from node\n"); 1278 return -ENODEV; 1279 } 1280 1281 device_lock(&pdev->dev); 1282 ch = platform_get_drvdata(pdev); 1283 if (ch) { 1284 /* Update sdr data in the bonded device */ 1285 ch->sdr = sdr; 1286 1287 /* Update sdr with bonded device data */ 1288 sdr->ch[ch->num] = ch; 1289 sdr->hw_ch_mask |= BIT(ch->num); 1290 } else { 1291 /* Defer */ 1292 dev_info(sdr->dev, "defer probe\n"); 1293 ret = -EPROBE_DEFER; 1294 } 1295 device_unlock(&pdev->dev); 1296 1297 put_device(&pdev->dev); 1298 1299 return ret; 1300} 1301 1302/* V4L2 SDR device probe */ 1303static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr) 1304{ 1305 int ret; 1306 1307 /* Validate any supported format for enabled channels */ 1308 ret = rcar_drif_set_default_format(sdr); 1309 if (ret) { 1310 dev_err(sdr->dev, "failed to set default format\n"); 1311 return ret; 1312 } 1313 1314 /* Set defaults */ 1315 sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE; 1316 1317 mutex_init(&sdr->v4l2_mutex); 1318 mutex_init(&sdr->vb_queue_mutex); 1319 spin_lock_init(&sdr->queued_bufs_lock); 1320 spin_lock_init(&sdr->dma_lock); 1321 INIT_LIST_HEAD(&sdr->queued_bufs); 1322 1323 /* Init videobuf2 queue structure */ 1324 sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; 1325 sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF; 1326 sdr->vb_queue.drv_priv = sdr; 1327 sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf); 1328 sdr->vb_queue.ops = &rcar_drif_vb2_ops; 1329 sdr->vb_queue.mem_ops = &vb2_vmalloc_memops; 1330 sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1331 1332 /* Init videobuf2 queue */ 1333 ret = vb2_queue_init(&sdr->vb_queue); 1334 if (ret) { 1335 dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret); 1336 return ret; 1337 } 1338 1339 /* Register the v4l2_device */ 1340 ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev); 1341 if (ret) { 1342 dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret); 1343 return ret; 1344 } 1345 1346 /* 1347 * Parse subdevs after v4l2_device_register because if the subdev 1348 * is already probed, bound and complete will be called immediately 1349 */ 1350 ret = rcar_drif_parse_subdevs(sdr); 1351 if (ret) 1352 goto error; 1353 1354 sdr->notifier.ops = &rcar_drif_notify_ops; 1355 1356 /* Register notifier */ 1357 ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier); 1358 if (ret < 0) { 1359 dev_err(sdr->dev, "failed: notifier register ret %d\n", ret); 1360 goto cleanup; 1361 } 1362 1363 return ret; 1364 1365cleanup: 1366 v4l2_async_notifier_cleanup(&sdr->notifier); 1367error: 1368 v4l2_device_unregister(&sdr->v4l2_dev); 1369 1370 return ret; 1371} 1372 1373/* V4L2 SDR device remove */ 1374static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr) 1375{ 1376 v4l2_async_notifier_unregister(&sdr->notifier); 1377 v4l2_async_notifier_cleanup(&sdr->notifier); 1378 v4l2_device_unregister(&sdr->v4l2_dev); 1379} 1380 1381/* DRIF channel probe */ 1382static int rcar_drif_probe(struct platform_device *pdev) 1383{ 1384 struct rcar_drif_sdr *sdr; 1385 struct device_node *np; 1386 struct rcar_drif *ch; 1387 struct resource *res; 1388 int ret; 1389 1390 /* Reserve memory for enabled channel */ 1391 ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL); 1392 if (!ch) 1393 return -ENOMEM; 1394 1395 ch->pdev = pdev; 1396 1397 /* Module clock */ 1398 ch->clk = devm_clk_get(&pdev->dev, "fck"); 1399 if (IS_ERR(ch->clk)) { 1400 ret = PTR_ERR(ch->clk); 1401 dev_err(&pdev->dev, "clk get failed (%d)\n", ret); 1402 return ret; 1403 } 1404 1405 /* Register map */ 1406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1407 ch->base = devm_ioremap_resource(&pdev->dev, res); 1408 if (IS_ERR(ch->base)) 1409 return PTR_ERR(ch->base); 1410 1411 ch->start = res->start; 1412 platform_set_drvdata(pdev, ch); 1413 1414 /* Check if both channels of the bond are enabled */ 1415 np = rcar_drif_bond_enabled(pdev); 1416 if (np) { 1417 /* Check if current channel acting as primary-bond */ 1418 if (!rcar_drif_primary_bond(pdev)) { 1419 ch->num = 1; /* Primary bond is channel 0 always */ 1420 of_node_put(np); 1421 return 0; 1422 } 1423 } 1424 1425 /* Reserve memory for SDR structure */ 1426 sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL); 1427 if (!sdr) { 1428 of_node_put(np); 1429 return -ENOMEM; 1430 } 1431 ch->sdr = sdr; 1432 sdr->dev = &pdev->dev; 1433 1434 /* Establish links between SDR and channel(s) */ 1435 sdr->ch[ch->num] = ch; 1436 sdr->hw_ch_mask = BIT(ch->num); 1437 if (np) { 1438 /* Check if bonded device is ready */ 1439 ret = rcar_drif_bond_available(sdr, np); 1440 of_node_put(np); 1441 if (ret) 1442 return ret; 1443 } 1444 sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask); 1445 1446 return rcar_drif_sdr_probe(sdr); 1447} 1448 1449/* DRIF channel remove */ 1450static int rcar_drif_remove(struct platform_device *pdev) 1451{ 1452 struct rcar_drif *ch = platform_get_drvdata(pdev); 1453 struct rcar_drif_sdr *sdr = ch->sdr; 1454 1455 /* Channel 0 will be the SDR instance */ 1456 if (ch->num) 1457 return 0; 1458 1459 /* SDR instance */ 1460 rcar_drif_sdr_remove(sdr); 1461 1462 return 0; 1463} 1464 1465/* FIXME: Implement suspend/resume support */ 1466static int __maybe_unused rcar_drif_suspend(struct device *dev) 1467{ 1468 return 0; 1469} 1470 1471static int __maybe_unused rcar_drif_resume(struct device *dev) 1472{ 1473 return 0; 1474} 1475 1476static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend, 1477 rcar_drif_resume); 1478 1479static const struct of_device_id rcar_drif_of_table[] = { 1480 { .compatible = "renesas,rcar-gen3-drif" }, 1481 { } 1482}; 1483MODULE_DEVICE_TABLE(of, rcar_drif_of_table); 1484 1485#define RCAR_DRIF_DRV_NAME "rcar_drif" 1486static struct platform_driver rcar_drif_driver = { 1487 .driver = { 1488 .name = RCAR_DRIF_DRV_NAME, 1489 .of_match_table = of_match_ptr(rcar_drif_of_table), 1490 .pm = &rcar_drif_pm_ops, 1491 }, 1492 .probe = rcar_drif_probe, 1493 .remove = rcar_drif_remove, 1494}; 1495 1496module_platform_driver(rcar_drif_driver); 1497 1498MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver"); 1499MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME); 1500MODULE_LICENSE("GPL"); 1501MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");