Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

media: raspberrypi: Add support for RP1-CFE

Add support for Raspberry Pi CFE. The CFE is a hardware block that
contains:

- MIPI D-PHY
- MIPI CSI-2 receiver
- Front End ISP (FE)

The driver has been upported from the Raspberry Pi kernel commit
88a681df9623 ("ARM: dts: bcm2712-rpi: Add i2c<n>_pins labels").

Co-developed-by: Naushir Patuck <naush@raspberrypi.com>
Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>

authored by

Tomi Valkeinen and committed by
Mauro Carvalho Chehab
6edb685a 1358bb52

+4989
+7
MAINTAINERS
··· 19372 19372 F: drivers/media/platform/raspberrypi/pisp_be/ 19373 19373 F: include/uapi/linux/media/raspberrypi/ 19374 19374 19375 + RASPBERRY PI PISP CAMERA FRONT END 19376 + M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> 19377 + M: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com> 19378 + S: Maintained 19379 + F: Documentation/devicetree/bindings/media/raspberrypi,rp1-cfe.yaml 19380 + F: drivers/media/platform/raspberrypi/rp1-cfe/ 19381 + 19375 19382 RC-CORE / LIRC FRAMEWORK 19376 19383 M: Sean Young <sean@mess.org> 19377 19384 L: linux-media@vger.kernel.org
+1
drivers/media/platform/raspberrypi/Kconfig
··· 3 3 comment "Raspberry Pi media platform drivers" 4 4 5 5 source "drivers/media/platform/raspberrypi/pisp_be/Kconfig" 6 + source "drivers/media/platform/raspberrypi/rp1-cfe/Kconfig"
+1
drivers/media/platform/raspberrypi/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 obj-y += pisp_be/ 4 + obj-y += rp1-cfe/
+15
drivers/media/platform/raspberrypi/rp1-cfe/Kconfig
··· 1 + # RP1 V4L2 camera support 2 + 3 + config VIDEO_RP1_CFE 4 + tristate "Raspberry Pi RP1 Camera Front End (CFE) video capture driver" 5 + depends on VIDEO_DEV 6 + depends on PM 7 + select VIDEO_V4L2_SUBDEV_API 8 + select MEDIA_CONTROLLER 9 + select VIDEOBUF2_DMA_CONTIG 10 + select V4L2_FWNODE 11 + help 12 + Say Y here to enable support for the Raspberry Pi RP1 Camera Front End. 13 + 14 + To compile this driver as a module, choose M here. The module will be 15 + called rp1-cfe.
+6
drivers/media/platform/raspberrypi/rp1-cfe/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Makefile for RP1 Camera Front End driver 4 + # 5 + rp1-cfe-objs := cfe.o csi2.o pisp-fe.o dphy.o 6 + obj-$(CONFIG_VIDEO_RP1_CFE) += rp1-cfe.o
+332
drivers/media/platform/raspberrypi/rp1-cfe/cfe-fmts.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * RP1 Camera Front End formats definition 4 + * 5 + * Copyright (C) 2021-2024 - Raspberry Pi Ltd. 6 + */ 7 + #ifndef _CFE_FMTS_H_ 8 + #define _CFE_FMTS_H_ 9 + 10 + #include "cfe.h" 11 + #include <media/mipi-csi2.h> 12 + 13 + static const struct cfe_fmt formats[] = { 14 + /* YUV Formats */ 15 + { 16 + .fourcc = V4L2_PIX_FMT_YUYV, 17 + .code = MEDIA_BUS_FMT_YUYV8_1X16, 18 + .depth = 16, 19 + .csi_dt = MIPI_CSI2_DT_YUV422_8B, 20 + }, 21 + { 22 + .fourcc = V4L2_PIX_FMT_UYVY, 23 + .code = MEDIA_BUS_FMT_UYVY8_1X16, 24 + .depth = 16, 25 + .csi_dt = MIPI_CSI2_DT_YUV422_8B, 26 + }, 27 + { 28 + .fourcc = V4L2_PIX_FMT_YVYU, 29 + .code = MEDIA_BUS_FMT_YVYU8_1X16, 30 + .depth = 16, 31 + .csi_dt = MIPI_CSI2_DT_YUV422_8B, 32 + }, 33 + { 34 + .fourcc = V4L2_PIX_FMT_VYUY, 35 + .code = MEDIA_BUS_FMT_VYUY8_1X16, 36 + .depth = 16, 37 + .csi_dt = MIPI_CSI2_DT_YUV422_8B, 38 + }, 39 + { 40 + /* RGB Formats */ 41 + .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 42 + .code = MEDIA_BUS_FMT_RGB565_2X8_LE, 43 + .depth = 16, 44 + .csi_dt = MIPI_CSI2_DT_RGB565, 45 + }, 46 + { .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ 47 + .code = MEDIA_BUS_FMT_RGB565_2X8_BE, 48 + .depth = 16, 49 + .csi_dt = MIPI_CSI2_DT_RGB565, 50 + }, 51 + { 52 + .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */ 53 + .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, 54 + .depth = 16, 55 + .csi_dt = MIPI_CSI2_DT_RGB555, 56 + }, 57 + { 58 + .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */ 59 + .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, 60 + .depth = 16, 61 + .csi_dt = MIPI_CSI2_DT_RGB555, 62 + }, 63 + { 64 + .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */ 65 + .code = MEDIA_BUS_FMT_RGB888_1X24, 66 + .depth = 24, 67 + .csi_dt = MIPI_CSI2_DT_RGB888, 68 + }, 69 + { 70 + .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */ 71 + .code = MEDIA_BUS_FMT_BGR888_1X24, 72 + .depth = 24, 73 + .csi_dt = MIPI_CSI2_DT_RGB888, 74 + }, 75 + { 76 + .fourcc = V4L2_PIX_FMT_RGB32, /* argb */ 77 + .code = MEDIA_BUS_FMT_ARGB8888_1X32, 78 + .depth = 32, 79 + .csi_dt = 0x0, 80 + }, 81 + 82 + /* Bayer Formats */ 83 + { 84 + .fourcc = V4L2_PIX_FMT_SBGGR8, 85 + .code = MEDIA_BUS_FMT_SBGGR8_1X8, 86 + .depth = 8, 87 + .csi_dt = MIPI_CSI2_DT_RAW8, 88 + .remap = { V4L2_PIX_FMT_SBGGR16, V4L2_PIX_FMT_PISP_COMP1_BGGR }, 89 + }, 90 + { 91 + .fourcc = V4L2_PIX_FMT_SGBRG8, 92 + .code = MEDIA_BUS_FMT_SGBRG8_1X8, 93 + .depth = 8, 94 + .csi_dt = MIPI_CSI2_DT_RAW8, 95 + .remap = { V4L2_PIX_FMT_SGBRG16, V4L2_PIX_FMT_PISP_COMP1_GBRG }, 96 + }, 97 + { 98 + .fourcc = V4L2_PIX_FMT_SGRBG8, 99 + .code = MEDIA_BUS_FMT_SGRBG8_1X8, 100 + .depth = 8, 101 + .csi_dt = MIPI_CSI2_DT_RAW8, 102 + .remap = { V4L2_PIX_FMT_SGRBG16, V4L2_PIX_FMT_PISP_COMP1_GRBG }, 103 + }, 104 + { 105 + .fourcc = V4L2_PIX_FMT_SRGGB8, 106 + .code = MEDIA_BUS_FMT_SRGGB8_1X8, 107 + .depth = 8, 108 + .csi_dt = MIPI_CSI2_DT_RAW8, 109 + .remap = { V4L2_PIX_FMT_SRGGB16, V4L2_PIX_FMT_PISP_COMP1_RGGB }, 110 + }, 111 + { 112 + .fourcc = V4L2_PIX_FMT_SBGGR10P, 113 + .code = MEDIA_BUS_FMT_SBGGR10_1X10, 114 + .depth = 10, 115 + .csi_dt = MIPI_CSI2_DT_RAW10, 116 + .remap = { V4L2_PIX_FMT_SBGGR16, V4L2_PIX_FMT_PISP_COMP1_BGGR }, 117 + }, 118 + { 119 + .fourcc = V4L2_PIX_FMT_SGBRG10P, 120 + .code = MEDIA_BUS_FMT_SGBRG10_1X10, 121 + .depth = 10, 122 + .csi_dt = MIPI_CSI2_DT_RAW10, 123 + .remap = { V4L2_PIX_FMT_SGBRG16, V4L2_PIX_FMT_PISP_COMP1_GBRG }, 124 + }, 125 + { 126 + .fourcc = V4L2_PIX_FMT_SGRBG10P, 127 + .code = MEDIA_BUS_FMT_SGRBG10_1X10, 128 + .depth = 10, 129 + .csi_dt = MIPI_CSI2_DT_RAW10, 130 + .remap = { V4L2_PIX_FMT_SGRBG16, V4L2_PIX_FMT_PISP_COMP1_GRBG }, 131 + }, 132 + { 133 + .fourcc = V4L2_PIX_FMT_SRGGB10P, 134 + .code = MEDIA_BUS_FMT_SRGGB10_1X10, 135 + .depth = 10, 136 + .csi_dt = MIPI_CSI2_DT_RAW10, 137 + .remap = { V4L2_PIX_FMT_SRGGB16, V4L2_PIX_FMT_PISP_COMP1_RGGB }, 138 + }, 139 + { 140 + .fourcc = V4L2_PIX_FMT_SBGGR12P, 141 + .code = MEDIA_BUS_FMT_SBGGR12_1X12, 142 + .depth = 12, 143 + .csi_dt = MIPI_CSI2_DT_RAW12, 144 + .remap = { V4L2_PIX_FMT_SBGGR16, V4L2_PIX_FMT_PISP_COMP1_BGGR }, 145 + }, 146 + { 147 + .fourcc = V4L2_PIX_FMT_SGBRG12P, 148 + .code = MEDIA_BUS_FMT_SGBRG12_1X12, 149 + .depth = 12, 150 + .csi_dt = MIPI_CSI2_DT_RAW12, 151 + .remap = { V4L2_PIX_FMT_SGBRG16, V4L2_PIX_FMT_PISP_COMP1_GBRG }, 152 + }, 153 + { 154 + .fourcc = V4L2_PIX_FMT_SGRBG12P, 155 + .code = MEDIA_BUS_FMT_SGRBG12_1X12, 156 + .depth = 12, 157 + .csi_dt = MIPI_CSI2_DT_RAW12, 158 + .remap = { V4L2_PIX_FMT_SGRBG16, V4L2_PIX_FMT_PISP_COMP1_GRBG }, 159 + }, 160 + { 161 + .fourcc = V4L2_PIX_FMT_SRGGB12P, 162 + .code = MEDIA_BUS_FMT_SRGGB12_1X12, 163 + .depth = 12, 164 + .csi_dt = MIPI_CSI2_DT_RAW12, 165 + .remap = { V4L2_PIX_FMT_SRGGB16, V4L2_PIX_FMT_PISP_COMP1_RGGB }, 166 + }, 167 + { 168 + .fourcc = V4L2_PIX_FMT_SBGGR14P, 169 + .code = MEDIA_BUS_FMT_SBGGR14_1X14, 170 + .depth = 14, 171 + .csi_dt = MIPI_CSI2_DT_RAW14, 172 + .remap = { V4L2_PIX_FMT_SBGGR16, V4L2_PIX_FMT_PISP_COMP1_BGGR }, 173 + }, 174 + { 175 + .fourcc = V4L2_PIX_FMT_SGBRG14P, 176 + .code = MEDIA_BUS_FMT_SGBRG14_1X14, 177 + .depth = 14, 178 + .csi_dt = MIPI_CSI2_DT_RAW14, 179 + .remap = { V4L2_PIX_FMT_SGBRG16, V4L2_PIX_FMT_PISP_COMP1_GBRG }, 180 + }, 181 + { 182 + .fourcc = V4L2_PIX_FMT_SGRBG14P, 183 + .code = MEDIA_BUS_FMT_SGRBG14_1X14, 184 + .depth = 14, 185 + .csi_dt = MIPI_CSI2_DT_RAW14, 186 + .remap = { V4L2_PIX_FMT_SGRBG16, V4L2_PIX_FMT_PISP_COMP1_GRBG }, 187 + }, 188 + { 189 + .fourcc = V4L2_PIX_FMT_SRGGB14P, 190 + .code = MEDIA_BUS_FMT_SRGGB14_1X14, 191 + .depth = 14, 192 + .csi_dt = MIPI_CSI2_DT_RAW14, 193 + .remap = { V4L2_PIX_FMT_SRGGB16, V4L2_PIX_FMT_PISP_COMP1_RGGB }, 194 + }, 195 + { 196 + .fourcc = V4L2_PIX_FMT_SBGGR16, 197 + .code = MEDIA_BUS_FMT_SBGGR16_1X16, 198 + .depth = 16, 199 + .csi_dt = MIPI_CSI2_DT_RAW16, 200 + .flags = CFE_FORMAT_FLAG_FE_OUT, 201 + .remap = { V4L2_PIX_FMT_SBGGR16, V4L2_PIX_FMT_PISP_COMP1_BGGR }, 202 + }, 203 + { 204 + .fourcc = V4L2_PIX_FMT_SGBRG16, 205 + .code = MEDIA_BUS_FMT_SGBRG16_1X16, 206 + .depth = 16, 207 + .csi_dt = MIPI_CSI2_DT_RAW16, 208 + .flags = CFE_FORMAT_FLAG_FE_OUT, 209 + .remap = { V4L2_PIX_FMT_SGBRG16, V4L2_PIX_FMT_PISP_COMP1_GBRG }, 210 + }, 211 + { 212 + .fourcc = V4L2_PIX_FMT_SGRBG16, 213 + .code = MEDIA_BUS_FMT_SGRBG16_1X16, 214 + .depth = 16, 215 + .csi_dt = MIPI_CSI2_DT_RAW16, 216 + .flags = CFE_FORMAT_FLAG_FE_OUT, 217 + .remap = { V4L2_PIX_FMT_SGRBG16, V4L2_PIX_FMT_PISP_COMP1_GRBG }, 218 + }, 219 + { 220 + .fourcc = V4L2_PIX_FMT_SRGGB16, 221 + .code = MEDIA_BUS_FMT_SRGGB16_1X16, 222 + .depth = 16, 223 + .csi_dt = MIPI_CSI2_DT_RAW16, 224 + .flags = CFE_FORMAT_FLAG_FE_OUT, 225 + .remap = { V4L2_PIX_FMT_SRGGB16, V4L2_PIX_FMT_PISP_COMP1_RGGB }, 226 + }, 227 + /* PiSP Compressed Mode 1 */ 228 + { 229 + .fourcc = V4L2_PIX_FMT_PISP_COMP1_RGGB, 230 + .code = MEDIA_BUS_FMT_SRGGB16_1X16, 231 + .depth = 8, 232 + .flags = CFE_FORMAT_FLAG_FE_OUT, 233 + }, 234 + { 235 + .fourcc = V4L2_PIX_FMT_PISP_COMP1_BGGR, 236 + .code = MEDIA_BUS_FMT_SBGGR16_1X16, 237 + .depth = 8, 238 + .flags = CFE_FORMAT_FLAG_FE_OUT, 239 + }, 240 + { 241 + .fourcc = V4L2_PIX_FMT_PISP_COMP1_GBRG, 242 + .code = MEDIA_BUS_FMT_SGBRG16_1X16, 243 + .depth = 8, 244 + .flags = CFE_FORMAT_FLAG_FE_OUT, 245 + }, 246 + { 247 + .fourcc = V4L2_PIX_FMT_PISP_COMP1_GRBG, 248 + .code = MEDIA_BUS_FMT_SGRBG16_1X16, 249 + .depth = 8, 250 + .flags = CFE_FORMAT_FLAG_FE_OUT, 251 + }, 252 + /* Greyscale format */ 253 + { 254 + .fourcc = V4L2_PIX_FMT_GREY, 255 + .code = MEDIA_BUS_FMT_Y8_1X8, 256 + .depth = 8, 257 + .csi_dt = MIPI_CSI2_DT_RAW8, 258 + .remap = { V4L2_PIX_FMT_Y16, V4L2_PIX_FMT_PISP_COMP1_MONO }, 259 + }, 260 + { 261 + .fourcc = V4L2_PIX_FMT_Y10P, 262 + .code = MEDIA_BUS_FMT_Y10_1X10, 263 + .depth = 10, 264 + .csi_dt = MIPI_CSI2_DT_RAW10, 265 + .remap = { V4L2_PIX_FMT_Y16, V4L2_PIX_FMT_PISP_COMP1_MONO }, 266 + }, 267 + { 268 + .fourcc = V4L2_PIX_FMT_Y12P, 269 + .code = MEDIA_BUS_FMT_Y12_1X12, 270 + .depth = 12, 271 + .csi_dt = MIPI_CSI2_DT_RAW12, 272 + .remap = { V4L2_PIX_FMT_Y16, V4L2_PIX_FMT_PISP_COMP1_MONO }, 273 + }, 274 + { 275 + .fourcc = V4L2_PIX_FMT_Y14P, 276 + .code = MEDIA_BUS_FMT_Y14_1X14, 277 + .depth = 14, 278 + .csi_dt = MIPI_CSI2_DT_RAW14, 279 + .remap = { V4L2_PIX_FMT_Y16, V4L2_PIX_FMT_PISP_COMP1_MONO }, 280 + }, 281 + { 282 + .fourcc = V4L2_PIX_FMT_Y16, 283 + .code = MEDIA_BUS_FMT_Y16_1X16, 284 + .depth = 16, 285 + .csi_dt = MIPI_CSI2_DT_RAW16, 286 + .flags = CFE_FORMAT_FLAG_FE_OUT, 287 + .remap = { V4L2_PIX_FMT_Y16, V4L2_PIX_FMT_PISP_COMP1_MONO }, 288 + }, 289 + { 290 + .fourcc = V4L2_PIX_FMT_PISP_COMP1_MONO, 291 + .code = MEDIA_BUS_FMT_Y16_1X16, 292 + .depth = 8, 293 + .flags = CFE_FORMAT_FLAG_FE_OUT, 294 + }, 295 + 296 + /* Embedded data formats */ 297 + { 298 + .fourcc = V4L2_META_FMT_GENERIC_8, 299 + .code = MEDIA_BUS_FMT_META_8, 300 + .depth = 8, 301 + .csi_dt = MIPI_CSI2_DT_EMBEDDED_8B, 302 + .flags = CFE_FORMAT_FLAG_META_CAP, 303 + }, 304 + { 305 + .fourcc = V4L2_META_FMT_GENERIC_CSI2_10, 306 + .code = MEDIA_BUS_FMT_META_10, 307 + .depth = 10, 308 + .csi_dt = MIPI_CSI2_DT_EMBEDDED_8B, 309 + .flags = CFE_FORMAT_FLAG_META_CAP, 310 + }, 311 + { 312 + .fourcc = V4L2_META_FMT_GENERIC_CSI2_12, 313 + .code = MEDIA_BUS_FMT_META_12, 314 + .depth = 12, 315 + .csi_dt = MIPI_CSI2_DT_EMBEDDED_8B, 316 + .flags = CFE_FORMAT_FLAG_META_CAP, 317 + }, 318 + 319 + /* Frontend formats */ 320 + { 321 + .fourcc = V4L2_META_FMT_RPI_FE_CFG, 322 + .code = MEDIA_BUS_FMT_FIXED, 323 + .flags = CFE_FORMAT_FLAG_META_OUT, 324 + }, 325 + { 326 + .fourcc = V4L2_META_FMT_RPI_FE_STATS, 327 + .code = MEDIA_BUS_FMT_FIXED, 328 + .flags = CFE_FORMAT_FLAG_META_CAP, 329 + }, 330 + }; 331 + 332 + #endif /* _CFE_FMTS_H_ */
+202
drivers/media/platform/raspberrypi/rp1-cfe/cfe-trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2024 Raspberry Pi Ltd. 4 + * Copyright (c) 2024 Ideas on Board Oy 5 + */ 6 + 7 + #undef TRACE_SYSTEM 8 + #define TRACE_SYSTEM cfe 9 + 10 + #if !defined(_CFE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 11 + #define _CFE_TRACE_H 12 + 13 + #include <linux/tracepoint.h> 14 + #include <media/videobuf2-v4l2.h> 15 + 16 + TRACE_EVENT(cfe_return_buffer, 17 + TP_PROTO(u32 node_id, u32 buf_idx, u32 queue_id), 18 + TP_ARGS(node_id, buf_idx, queue_id), 19 + TP_STRUCT__entry( 20 + __field(u32, node_id) 21 + __field(u32, buf_idx) 22 + __field(u32, queue_id) 23 + ), 24 + TP_fast_assign( 25 + __entry->node_id = node_id; 26 + __entry->buf_idx = buf_idx; 27 + __entry->queue_id = queue_id; 28 + ), 29 + TP_printk("node=%u buf=%u, queue=%u", __entry->node_id, 30 + __entry->buf_idx, __entry->queue_id) 31 + ); 32 + 33 + DECLARE_EVENT_CLASS(cfe_buffer_template, 34 + TP_PROTO(u32 node_id, struct vb2_buffer *buf), 35 + TP_ARGS(node_id, buf), 36 + TP_STRUCT__entry( 37 + __field(u32, node_id) 38 + __field(u32, buf_idx) 39 + ), 40 + TP_fast_assign( 41 + __entry->node_id = node_id; 42 + __entry->buf_idx = buf->index; 43 + ), 44 + TP_printk("node=%u buf=%u", __entry->node_id, __entry->buf_idx) 45 + ); 46 + 47 + DEFINE_EVENT(cfe_buffer_template, cfe_buffer_prepare, 48 + TP_PROTO(u32 node_id, struct vb2_buffer *buf), 49 + TP_ARGS(node_id, buf)); 50 + 51 + TRACE_EVENT(cfe_buffer_queue, 52 + TP_PROTO(u32 node_id, struct vb2_buffer *buf, bool schedule_now), 53 + TP_ARGS(node_id, buf, schedule_now), 54 + TP_STRUCT__entry( 55 + __field(u32, node_id) 56 + __field(u32, buf_idx) 57 + __field(bool, schedule_now) 58 + ), 59 + TP_fast_assign( 60 + __entry->node_id = node_id; 61 + __entry->buf_idx = buf->index; 62 + __entry->schedule_now = schedule_now; 63 + ), 64 + TP_printk("node=%u buf=%u%s", __entry->node_id, __entry->buf_idx, 65 + __entry->schedule_now ? " schedule immediately" : "") 66 + ); 67 + 68 + DEFINE_EVENT(cfe_buffer_template, cfe_csi2_schedule, 69 + TP_PROTO(u32 node_id, struct vb2_buffer *buf), 70 + TP_ARGS(node_id, buf)); 71 + 72 + DEFINE_EVENT(cfe_buffer_template, cfe_fe_schedule, 73 + TP_PROTO(u32 node_id, struct vb2_buffer *buf), 74 + TP_ARGS(node_id, buf)); 75 + 76 + TRACE_EVENT(cfe_buffer_complete, 77 + TP_PROTO(u32 node_id, struct vb2_v4l2_buffer *buf), 78 + TP_ARGS(node_id, buf), 79 + TP_STRUCT__entry( 80 + __field(u32, node_id) 81 + __field(u32, buf_idx) 82 + __field(u32, seq) 83 + __field(u64, ts) 84 + ), 85 + TP_fast_assign( 86 + __entry->node_id = node_id; 87 + __entry->buf_idx = buf->vb2_buf.index; 88 + __entry->seq = buf->sequence; 89 + __entry->ts = buf->vb2_buf.timestamp; 90 + ), 91 + TP_printk("node=%u buf=%u seq=%u ts=%llu", __entry->node_id, 92 + __entry->buf_idx, __entry->seq, __entry->ts) 93 + ); 94 + 95 + TRACE_EVENT(cfe_frame_start, 96 + TP_PROTO(u32 node_id, u32 fs_count), 97 + TP_ARGS(node_id, fs_count), 98 + TP_STRUCT__entry( 99 + __field(u32, node_id) 100 + __field(u32, fs_count) 101 + ), 102 + TP_fast_assign( 103 + __entry->node_id = node_id; 104 + __entry->fs_count = fs_count; 105 + ), 106 + TP_printk("node=%u fs_count=%u", __entry->node_id, __entry->fs_count) 107 + ); 108 + 109 + TRACE_EVENT(cfe_frame_end, 110 + TP_PROTO(u32 node_id, u32 fs_count), 111 + TP_ARGS(node_id, fs_count), 112 + TP_STRUCT__entry( 113 + __field(u32, node_id) 114 + __field(u32, fs_count) 115 + ), 116 + TP_fast_assign( 117 + __entry->node_id = node_id; 118 + __entry->fs_count = fs_count; 119 + ), 120 + TP_printk("node=%u fs_count=%u", __entry->node_id, __entry->fs_count) 121 + ); 122 + 123 + TRACE_EVENT(cfe_prepare_next_job, 124 + TP_PROTO(bool fe_enabled), 125 + TP_ARGS(fe_enabled), 126 + TP_STRUCT__entry( 127 + __field(bool, fe_enabled) 128 + ), 129 + TP_fast_assign( 130 + __entry->fe_enabled = fe_enabled; 131 + ), 132 + TP_printk("fe_enabled=%u", __entry->fe_enabled) 133 + ); 134 + 135 + /* These are copied from csi2.c */ 136 + #define CSI2_STATUS_IRQ_FS(x) (BIT(0) << (x)) 137 + #define CSI2_STATUS_IRQ_FE(x) (BIT(4) << (x)) 138 + #define CSI2_STATUS_IRQ_FE_ACK(x) (BIT(8) << (x)) 139 + #define CSI2_STATUS_IRQ_LE(x) (BIT(12) << (x)) 140 + #define CSI2_STATUS_IRQ_LE_ACK(x) (BIT(16) << (x)) 141 + 142 + TRACE_EVENT(csi2_irq, 143 + TP_PROTO(u32 channel, u32 status, u32 dbg), 144 + TP_ARGS(channel, status, dbg), 145 + TP_STRUCT__entry( 146 + __field(u32, channel) 147 + __field(u32, status) 148 + __field(u32, dbg) 149 + ), 150 + TP_fast_assign( 151 + __entry->channel = channel; 152 + __entry->status = status; 153 + __entry->dbg = dbg; 154 + ), 155 + TP_printk("ch=%u flags=[ %s%s%s%s%s] frame=%u line=%u\n", 156 + __entry->channel, 157 + (__entry->status & CSI2_STATUS_IRQ_FS(__entry->channel)) ? 158 + "FS " : "", 159 + (__entry->status & CSI2_STATUS_IRQ_FE(__entry->channel)) ? 160 + "FE " : "", 161 + (__entry->status & CSI2_STATUS_IRQ_FE_ACK(__entry->channel)) ? 162 + "FE_ACK " : "", 163 + (__entry->status & CSI2_STATUS_IRQ_LE(__entry->channel)) ? 164 + "LE " : "", 165 + (__entry->status & CSI2_STATUS_IRQ_LE_ACK(__entry->channel)) ? 166 + "LE_ACK " : "", 167 + __entry->dbg >> 16, __entry->dbg & 0xffff) 168 + ); 169 + 170 + TRACE_EVENT(fe_irq, 171 + TP_PROTO(u32 status, u32 output_status, u32 frame_status, 172 + u32 error_status, u32 int_status), 173 + TP_ARGS(status, output_status, frame_status, error_status, int_status), 174 + TP_STRUCT__entry( 175 + __field(u32, status) 176 + __field(u32, output_status) 177 + __field(u32, frame_status) 178 + __field(u32, error_status) 179 + __field(u32, int_status) 180 + ), 181 + TP_fast_assign( 182 + __entry->status = status; 183 + __entry->output_status = output_status; 184 + __entry->frame_status = frame_status; 185 + __entry->error_status = error_status; 186 + __entry->int_status = int_status; 187 + ), 188 + TP_printk("status 0x%x out_status 0x%x frame_status 0x%x error_status 0x%x int_status 0x%x", 189 + __entry->status, 190 + __entry->output_status, 191 + __entry->frame_status, 192 + __entry->error_status, 193 + __entry->int_status) 194 + ); 195 + 196 + #endif /* _CFE_TRACE_H */ 197 + 198 + /* This part must be outside protection */ 199 + #undef TRACE_INCLUDE_PATH 200 + #define TRACE_INCLUDE_PATH . 201 + #define TRACE_INCLUDE_FILE ../../drivers/media/platform/raspberrypi/rp1-cfe/cfe-trace 202 + #include <trace/define_trace.h>
+2504
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * RP1 Camera Front End Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + * Copyright (c) 2023-2024 Ideas on Board Oy 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/debugfs.h> 11 + #include <linux/delay.h> 12 + #include <linux/device.h> 13 + #include <linux/dma-mapping.h> 14 + #include <linux/err.h> 15 + #include <linux/fwnode.h> 16 + #include <linux/init.h> 17 + #include <linux/interrupt.h> 18 + #include <linux/io.h> 19 + #include <linux/lcm.h> 20 + #include <linux/math.h> 21 + #include <linux/module.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/pm_runtime.h> 24 + #include <linux/property.h> 25 + #include <linux/seq_file.h> 26 + #include <linux/slab.h> 27 + #include <linux/uaccess.h> 28 + #include <linux/videodev2.h> 29 + 30 + #include <media/v4l2-async.h> 31 + #include <media/v4l2-common.h> 32 + #include <media/v4l2-ctrls.h> 33 + #include <media/v4l2-dev.h> 34 + #include <media/v4l2-device.h> 35 + #include <media/v4l2-event.h> 36 + #include <media/v4l2-fwnode.h> 37 + #include <media/v4l2-ioctl.h> 38 + #include <media/v4l2-mc.h> 39 + #include <media/videobuf2-dma-contig.h> 40 + 41 + #include <linux/media/raspberrypi/pisp_fe_config.h> 42 + #include <linux/media/raspberrypi/pisp_fe_statistics.h> 43 + 44 + #include "cfe-fmts.h" 45 + #include "cfe.h" 46 + #include "csi2.h" 47 + #include "pisp-fe.h" 48 + 49 + #define CREATE_TRACE_POINTS 50 + #include "cfe-trace.h" 51 + 52 + #define CFE_MODULE_NAME "rp1-cfe" 53 + #define CFE_VERSION "1.0" 54 + 55 + #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg) 56 + #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg) 57 + #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg) 58 + 59 + /* MIPICFG registers */ 60 + #define MIPICFG_CFG 0x004 61 + #define MIPICFG_INTR 0x028 62 + #define MIPICFG_INTE 0x02c 63 + #define MIPICFG_INTF 0x030 64 + #define MIPICFG_INTS 0x034 65 + 66 + #define MIPICFG_CFG_SEL_CSI BIT(0) 67 + 68 + #define MIPICFG_INT_CSI_DMA BIT(0) 69 + #define MIPICFG_INT_CSI_HOST BIT(2) 70 + #define MIPICFG_INT_PISP_FE BIT(4) 71 + 72 + #define BPL_ALIGNMENT 16 73 + #define MAX_BYTESPERLINE 0xffffff00 74 + #define MAX_BUFFER_SIZE 0xffffff00 75 + /* 76 + * Max width is therefore determined by the max stride divided by the number of 77 + * bits per pixel. 78 + * 79 + * However, to avoid overflow issues let's use a 16k maximum. This lets us 80 + * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful 81 + * review and adjustment of the code is needed so that it will deal with 82 + * overflows correctly. 83 + */ 84 + #define MAX_WIDTH 16384 85 + #define MAX_HEIGHT MAX_WIDTH 86 + /* Define a nominal minimum image size */ 87 + #define MIN_WIDTH 16 88 + #define MIN_HEIGHT 16 89 + 90 + #define MIN_META_WIDTH 4 91 + #define MIN_META_HEIGHT 1 92 + 93 + const struct v4l2_mbus_framefmt cfe_default_format = { 94 + .width = 640, 95 + .height = 480, 96 + .code = MEDIA_BUS_FMT_SRGGB10_1X10, 97 + .field = V4L2_FIELD_NONE, 98 + .colorspace = V4L2_COLORSPACE_RAW, 99 + .ycbcr_enc = V4L2_YCBCR_ENC_601, 100 + .quantization = V4L2_QUANTIZATION_FULL_RANGE, 101 + .xfer_func = V4L2_XFER_FUNC_NONE, 102 + }; 103 + 104 + enum node_ids { 105 + /* CSI2 HW output nodes first. */ 106 + CSI2_CH0, 107 + CSI2_CH1, 108 + CSI2_CH2, 109 + CSI2_CH3, 110 + /* FE only nodes from here on. */ 111 + FE_OUT0, 112 + FE_OUT1, 113 + FE_STATS, 114 + FE_CONFIG, 115 + NUM_NODES 116 + }; 117 + 118 + struct node_description { 119 + enum node_ids id; 120 + const char *name; 121 + unsigned int caps; 122 + unsigned int pad_flags; 123 + unsigned int link_pad; 124 + }; 125 + 126 + /* Must match the ordering of enum ids */ 127 + static const struct node_description node_desc[NUM_NODES] = { 128 + [CSI2_CH0] = { 129 + .name = "csi2-ch0", 130 + .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE, 131 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 132 + .link_pad = CSI2_PAD_FIRST_SOURCE + 0 133 + }, 134 + /* 135 + * At the moment the main userspace component (libcamera) doesn't 136 + * support metadata with video nodes that support both video and 137 + * metadata. So for the time being this node is set to only support 138 + * V4L2_CAP_META_CAPTURE. 139 + */ 140 + [CSI2_CH1] = { 141 + .name = "csi2-ch1", 142 + .caps = V4L2_CAP_META_CAPTURE, 143 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 144 + .link_pad = CSI2_PAD_FIRST_SOURCE + 1 145 + }, 146 + [CSI2_CH2] = { 147 + .name = "csi2-ch2", 148 + .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE, 149 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 150 + .link_pad = CSI2_PAD_FIRST_SOURCE + 2 151 + }, 152 + [CSI2_CH3] = { 153 + .name = "csi2-ch3", 154 + .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE, 155 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 156 + .link_pad = CSI2_PAD_FIRST_SOURCE + 3 157 + }, 158 + [FE_OUT0] = { 159 + .name = "fe-image0", 160 + .caps = V4L2_CAP_VIDEO_CAPTURE, 161 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 162 + .link_pad = FE_OUTPUT0_PAD 163 + }, 164 + [FE_OUT1] = { 165 + .name = "fe-image1", 166 + .caps = V4L2_CAP_VIDEO_CAPTURE, 167 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 168 + .link_pad = FE_OUTPUT1_PAD 169 + }, 170 + [FE_STATS] = { 171 + .name = "fe-stats", 172 + .caps = V4L2_CAP_META_CAPTURE, 173 + .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT, 174 + .link_pad = FE_STATS_PAD 175 + }, 176 + [FE_CONFIG] = { 177 + .name = "fe-config", 178 + .caps = V4L2_CAP_META_OUTPUT, 179 + .pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT, 180 + .link_pad = FE_CONFIG_PAD 181 + }, 182 + }; 183 + 184 + #define is_fe_node(node) (((node)->id) >= FE_OUT0) 185 + #define is_csi2_node(node) (!is_fe_node(node)) 186 + 187 + #define node_supports_image_output(node) \ 188 + (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE) 189 + #define node_supports_meta_output(node) \ 190 + (node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE) 191 + #define node_supports_image_input(node) \ 192 + (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT) 193 + #define node_supports_meta_input(node) \ 194 + (node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT) 195 + #define node_supports_image(node) \ 196 + (node_supports_image_output(node) || node_supports_image_input(node)) 197 + #define node_supports_meta(node) \ 198 + (node_supports_meta_output(node) || node_supports_meta_input(node)) 199 + 200 + #define is_image_output_node(node) \ 201 + ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 202 + #define is_image_input_node(node) \ 203 + ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 204 + #define is_image_node(node) \ 205 + (is_image_output_node(node) || is_image_input_node(node)) 206 + #define is_meta_output_node(node) \ 207 + ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE) 208 + #define is_meta_input_node(node) \ 209 + ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT) 210 + #define is_meta_node(node) \ 211 + (is_meta_output_node(node) || is_meta_input_node(node)) 212 + 213 + /* To track state across all nodes. */ 214 + #define NODE_REGISTERED BIT(0) 215 + #define NODE_ENABLED BIT(1) 216 + #define NODE_STREAMING BIT(2) 217 + #define FS_INT BIT(3) 218 + #define FE_INT BIT(4) 219 + #define NUM_STATES 5 220 + 221 + struct cfe_buffer { 222 + struct vb2_v4l2_buffer vb; 223 + struct list_head list; 224 + }; 225 + 226 + struct cfe_config_buffer { 227 + struct cfe_buffer buf; 228 + struct pisp_fe_config config; 229 + }; 230 + 231 + static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb) 232 + { 233 + return container_of(vb, struct cfe_buffer, vb.vb2_buf); 234 + } 235 + 236 + static inline 237 + struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf) 238 + { 239 + return container_of(buf, struct cfe_config_buffer, buf); 240 + } 241 + 242 + struct cfe_node { 243 + /* Node id */ 244 + enum node_ids id; 245 + /* Pointer pointing to current v4l2_buffer */ 246 + struct cfe_buffer *cur_frm; 247 + /* Pointer pointing to next v4l2_buffer */ 248 + struct cfe_buffer *next_frm; 249 + /* Used to store current pixel format */ 250 + struct v4l2_format vid_fmt; 251 + /* Used to store current meta format */ 252 + struct v4l2_format meta_fmt; 253 + /* Buffer queue used in video-buf */ 254 + struct vb2_queue buffer_queue; 255 + /* Queue of filled frames */ 256 + struct list_head dma_queue; 257 + /* lock used to access this structure */ 258 + struct mutex lock; 259 + /* Identifies video device for this channel */ 260 + struct video_device video_dev; 261 + /* Pointer to the parent handle */ 262 + struct cfe_device *cfe; 263 + /* Media pad for this node */ 264 + struct media_pad pad; 265 + /* Frame-start counter */ 266 + unsigned int fs_count; 267 + /* Timestamp of the current buffer */ 268 + u64 ts; 269 + }; 270 + 271 + struct cfe_device { 272 + struct dentry *debugfs; 273 + struct kref kref; 274 + 275 + /* peripheral base address */ 276 + void __iomem *mipi_cfg_base; 277 + 278 + struct clk *clk; 279 + 280 + /* V4l2 device */ 281 + struct v4l2_device v4l2_dev; 282 + struct media_device mdev; 283 + struct media_pipeline pipe; 284 + 285 + /* IRQ lock for node state and DMA queues */ 286 + spinlock_t state_lock; 287 + bool job_ready; 288 + bool job_queued; 289 + 290 + /* parent device */ 291 + struct platform_device *pdev; 292 + /* subdevice async Notifier */ 293 + struct v4l2_async_notifier notifier; 294 + 295 + /* Source sub device */ 296 + struct v4l2_subdev *source_sd; 297 + /* Source subdev's pad */ 298 + u32 source_pad; 299 + 300 + struct cfe_node node[NUM_NODES]; 301 + DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES); 302 + 303 + struct csi2_device csi2; 304 + struct pisp_fe_device fe; 305 + 306 + int fe_csi2_channel; 307 + 308 + /* Mask of enabled streams */ 309 + u64 streams_mask; 310 + }; 311 + 312 + static inline bool is_fe_enabled(struct cfe_device *cfe) 313 + { 314 + return cfe->fe_csi2_channel != -1; 315 + } 316 + 317 + static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev) 318 + { 319 + return container_of(v4l2_dev, struct cfe_device, v4l2_dev); 320 + } 321 + 322 + static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset) 323 + { 324 + return readl(cfe->mipi_cfg_base + offset); 325 + } 326 + 327 + static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val) 328 + { 329 + writel(val, cfe->mipi_cfg_base + offset); 330 + } 331 + 332 + static bool check_state(struct cfe_device *cfe, unsigned long state, 333 + unsigned int node_id) 334 + { 335 + unsigned long bit; 336 + 337 + for_each_set_bit(bit, &state, sizeof(state)) { 338 + if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags)) 339 + return false; 340 + } 341 + 342 + return true; 343 + } 344 + 345 + static void set_state(struct cfe_device *cfe, unsigned long state, 346 + unsigned int node_id) 347 + { 348 + unsigned long bit; 349 + 350 + for_each_set_bit(bit, &state, sizeof(state)) 351 + set_bit(bit + (node_id * NUM_STATES), cfe->node_flags); 352 + } 353 + 354 + static void clear_state(struct cfe_device *cfe, unsigned long state, 355 + unsigned int node_id) 356 + { 357 + unsigned long bit; 358 + 359 + for_each_set_bit(bit, &state, sizeof(state)) 360 + clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags); 361 + } 362 + 363 + static bool test_any_node(struct cfe_device *cfe, unsigned long cond) 364 + { 365 + for (unsigned int i = 0; i < NUM_NODES; i++) { 366 + if (check_state(cfe, cond, i)) 367 + return true; 368 + } 369 + 370 + return false; 371 + } 372 + 373 + static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond, 374 + unsigned long cond) 375 + { 376 + for (unsigned int i = 0; i < NUM_NODES; i++) { 377 + if (check_state(cfe, precond, i)) { 378 + if (!check_state(cfe, cond, i)) 379 + return false; 380 + } 381 + } 382 + 383 + return true; 384 + } 385 + 386 + static int mipi_cfg_regs_show(struct seq_file *s, void *data) 387 + { 388 + struct cfe_device *cfe = s->private; 389 + int ret; 390 + 391 + ret = pm_runtime_resume_and_get(&cfe->pdev->dev); 392 + if (ret) 393 + return ret; 394 + 395 + #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg)) 396 + DUMP(MIPICFG_CFG); 397 + DUMP(MIPICFG_INTR); 398 + DUMP(MIPICFG_INTE); 399 + DUMP(MIPICFG_INTF); 400 + DUMP(MIPICFG_INTS); 401 + #undef DUMP 402 + 403 + pm_runtime_put(&cfe->pdev->dev); 404 + 405 + return 0; 406 + } 407 + 408 + DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs); 409 + 410 + /* Format setup functions */ 411 + const struct cfe_fmt *find_format_by_code(u32 code) 412 + { 413 + for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) { 414 + if (formats[i].code == code) 415 + return &formats[i]; 416 + } 417 + 418 + return NULL; 419 + } 420 + 421 + const struct cfe_fmt *find_format_by_pix(u32 pixelformat) 422 + { 423 + for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) { 424 + if (formats[i].fourcc == pixelformat) 425 + return &formats[i]; 426 + } 427 + 428 + return NULL; 429 + } 430 + 431 + static const struct cfe_fmt *find_format_by_code_and_fourcc(u32 code, 432 + u32 fourcc) 433 + { 434 + for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) { 435 + if (formats[i].code == code && formats[i].fourcc == fourcc) 436 + return &formats[i]; 437 + } 438 + 439 + return NULL; 440 + } 441 + 442 + /* 443 + * Given the mbus code, find the 16 bit remapped code. Returns 0 if no remap 444 + * possible. 445 + */ 446 + u32 cfe_find_16bit_code(u32 code) 447 + { 448 + const struct cfe_fmt *cfe_fmt; 449 + 450 + cfe_fmt = find_format_by_code(code); 451 + 452 + if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT]) 453 + return 0; 454 + 455 + cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]); 456 + if (!cfe_fmt) 457 + return 0; 458 + 459 + return cfe_fmt->code; 460 + } 461 + 462 + /* 463 + * Given the mbus code, find the 8 bit compressed code. Returns 0 if no remap 464 + * possible. 465 + */ 466 + u32 cfe_find_compressed_code(u32 code) 467 + { 468 + const struct cfe_fmt *cfe_fmt; 469 + 470 + cfe_fmt = find_format_by_code(code); 471 + 472 + if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED]) 473 + return 0; 474 + 475 + cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]); 476 + if (!cfe_fmt) 477 + return 0; 478 + 479 + return cfe_fmt->code; 480 + } 481 + 482 + static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe, 483 + const struct cfe_fmt *fmt, 484 + struct v4l2_format *f) 485 + { 486 + unsigned int min_bytesperline; 487 + 488 + v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2, 489 + &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0); 490 + 491 + min_bytesperline = 492 + ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT); 493 + 494 + if (f->fmt.pix.bytesperline > min_bytesperline && 495 + f->fmt.pix.bytesperline <= MAX_BYTESPERLINE) 496 + f->fmt.pix.bytesperline = 497 + ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT); 498 + else 499 + f->fmt.pix.bytesperline = min_bytesperline; 500 + 501 + f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; 502 + 503 + cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__, 504 + &f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height, 505 + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); 506 + } 507 + 508 + static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe, 509 + const struct cfe_fmt *fmt, 510 + struct v4l2_format *f) 511 + { 512 + v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2, 513 + &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT, 514 + 0, 0); 515 + 516 + f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3; 517 + f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline; 518 + 519 + cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__, 520 + &f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height, 521 + f->fmt.meta.bytesperline, f->fmt.meta.buffersize); 522 + } 523 + 524 + static void cfe_schedule_next_csi2_job(struct cfe_device *cfe) 525 + { 526 + struct cfe_buffer *buf; 527 + dma_addr_t addr; 528 + 529 + for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) { 530 + struct cfe_node *node = &cfe->node[i]; 531 + unsigned int stride, size; 532 + 533 + if (!check_state(cfe, NODE_STREAMING, i)) 534 + continue; 535 + 536 + buf = list_first_entry(&node->dma_queue, struct cfe_buffer, 537 + list); 538 + node->next_frm = buf; 539 + list_del(&buf->list); 540 + 541 + trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf); 542 + 543 + if (is_meta_node(node)) { 544 + size = node->meta_fmt.fmt.meta.buffersize; 545 + /* We use CSI2_CH_CTRL_PACK_BYTES, so stride == 0 */ 546 + stride = 0; 547 + } else { 548 + size = node->vid_fmt.fmt.pix.sizeimage; 549 + stride = node->vid_fmt.fmt.pix.bytesperline; 550 + } 551 + 552 + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); 553 + csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size); 554 + } 555 + } 556 + 557 + static void cfe_schedule_next_pisp_job(struct cfe_device *cfe) 558 + { 559 + struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 }; 560 + struct cfe_config_buffer *config_buf; 561 + struct cfe_buffer *buf; 562 + 563 + for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) { 564 + struct cfe_node *node = &cfe->node[i]; 565 + 566 + if (!check_state(cfe, NODE_STREAMING, i)) 567 + continue; 568 + 569 + buf = list_first_entry(&node->dma_queue, struct cfe_buffer, 570 + list); 571 + 572 + trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf); 573 + 574 + node->next_frm = buf; 575 + vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf; 576 + list_del(&buf->list); 577 + } 578 + 579 + config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm); 580 + pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config); 581 + } 582 + 583 + static bool cfe_check_job_ready(struct cfe_device *cfe) 584 + { 585 + for (unsigned int i = 0; i < NUM_NODES; i++) { 586 + struct cfe_node *node = &cfe->node[i]; 587 + 588 + if (!check_state(cfe, NODE_ENABLED, i)) 589 + continue; 590 + 591 + if (list_empty(&node->dma_queue)) 592 + return false; 593 + } 594 + 595 + return true; 596 + } 597 + 598 + static void cfe_prepare_next_job(struct cfe_device *cfe) 599 + { 600 + trace_cfe_prepare_next_job(is_fe_enabled(cfe)); 601 + 602 + cfe->job_queued = true; 603 + cfe_schedule_next_csi2_job(cfe); 604 + if (is_fe_enabled(cfe)) 605 + cfe_schedule_next_pisp_job(cfe); 606 + 607 + /* Flag if another job is ready after this. */ 608 + cfe->job_ready = cfe_check_job_ready(cfe); 609 + } 610 + 611 + static void cfe_process_buffer_complete(struct cfe_node *node, 612 + enum vb2_buffer_state state) 613 + { 614 + trace_cfe_buffer_complete(node->id, &node->cur_frm->vb); 615 + 616 + node->cur_frm->vb.sequence = node->fs_count - 1; 617 + vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state); 618 + } 619 + 620 + static void cfe_queue_event_sof(struct cfe_node *node) 621 + { 622 + struct v4l2_event event = { 623 + .type = V4L2_EVENT_FRAME_SYNC, 624 + .u.frame_sync.frame_sequence = node->fs_count - 1, 625 + }; 626 + 627 + v4l2_event_queue(&node->video_dev, &event); 628 + } 629 + 630 + static void cfe_sof_isr(struct cfe_node *node) 631 + { 632 + struct cfe_device *cfe = node->cfe; 633 + bool matching_fs = true; 634 + 635 + trace_cfe_frame_start(node->id, node->fs_count); 636 + 637 + /* 638 + * If the sensor is producing unexpected frame event ordering over a 639 + * sustained period of time, guard against the possibility of coming 640 + * here and orphaning the cur_frm if it's not been dequeued already. 641 + * Unfortunately, there is not enough hardware state to tell if this 642 + * may have occurred. 643 + */ 644 + if (WARN(node->cur_frm, "%s: [%s] Orphanded frame at seq %u\n", 645 + __func__, node_desc[node->id].name, node->fs_count)) 646 + cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR); 647 + 648 + node->cur_frm = node->next_frm; 649 + node->next_frm = NULL; 650 + node->fs_count++; 651 + 652 + node->ts = ktime_get_ns(); 653 + for (unsigned int i = 0; i < NUM_NODES; i++) { 654 + if (!check_state(cfe, NODE_STREAMING, i) || i == node->id) 655 + continue; 656 + /* 657 + * This checks if any other node has seen a FS. If yes, use the 658 + * same timestamp, eventually across all node buffers. 659 + */ 660 + if (cfe->node[i].fs_count >= node->fs_count) 661 + node->ts = cfe->node[i].ts; 662 + /* 663 + * This checks if all other node have seen a matching FS. If 664 + * yes, we can flag another job to be queued. 665 + */ 666 + if (matching_fs && cfe->node[i].fs_count != node->fs_count) 667 + matching_fs = false; 668 + } 669 + 670 + if (matching_fs) 671 + cfe->job_queued = false; 672 + 673 + if (node->cur_frm) 674 + node->cur_frm->vb.vb2_buf.timestamp = node->ts; 675 + 676 + set_state(cfe, FS_INT, node->id); 677 + clear_state(cfe, FE_INT, node->id); 678 + 679 + if (is_image_output_node(node)) 680 + cfe_queue_event_sof(node); 681 + } 682 + 683 + static void cfe_eof_isr(struct cfe_node *node) 684 + { 685 + struct cfe_device *cfe = node->cfe; 686 + 687 + trace_cfe_frame_end(node->id, node->fs_count - 1); 688 + 689 + if (node->cur_frm) 690 + cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE); 691 + 692 + node->cur_frm = NULL; 693 + set_state(cfe, FE_INT, node->id); 694 + clear_state(cfe, FS_INT, node->id); 695 + } 696 + 697 + static irqreturn_t cfe_isr(int irq, void *dev) 698 + { 699 + struct cfe_device *cfe = dev; 700 + bool sof[NUM_NODES] = { 0 }, eof[NUM_NODES] = { 0 }; 701 + u32 sts; 702 + 703 + sts = cfg_reg_read(cfe, MIPICFG_INTS); 704 + 705 + if (sts & MIPICFG_INT_CSI_DMA) 706 + csi2_isr(&cfe->csi2, sof, eof); 707 + 708 + if (sts & MIPICFG_INT_PISP_FE) 709 + pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS, 710 + eof + CSI2_NUM_CHANNELS); 711 + 712 + spin_lock(&cfe->state_lock); 713 + 714 + for (unsigned int i = 0; i < NUM_NODES; i++) { 715 + struct cfe_node *node = &cfe->node[i]; 716 + 717 + /* 718 + * The check_state(NODE_STREAMING) is to ensure we do not loop 719 + * over the CSI2_CHx nodes when the FE is active since they 720 + * generate interrupts even though the node is not streaming. 721 + */ 722 + if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i])) 723 + continue; 724 + 725 + /* 726 + * There are 3 cases where we could get FS + FE_ACK at 727 + * the same time: 728 + * 1) FE of the current frame, and FS of the next frame. 729 + * 2) FS + FE of the same frame. 730 + * 3) FE of the current frame, and FS + FE of the next 731 + * frame. To handle this, see the sof handler below. 732 + * 733 + * (1) is handled implicitly by the ordering of the FE and FS 734 + * handlers below. 735 + */ 736 + if (eof[i]) { 737 + /* 738 + * The condition below tests for (2). Run the FS handler 739 + * first before the FE handler, both for the current 740 + * frame. 741 + */ 742 + if (sof[i] && !check_state(cfe, FS_INT, i)) { 743 + cfe_sof_isr(node); 744 + sof[i] = false; 745 + } 746 + 747 + cfe_eof_isr(node); 748 + } 749 + 750 + if (sof[i]) { 751 + /* 752 + * The condition below tests for (3). In such cases, we 753 + * come in here with FS flag set in the node state from 754 + * the previous frame since it only gets cleared in 755 + * cfe_eof_isr(). Handle the FE for the previous 756 + * frame first before the FS handler for the current 757 + * frame. 758 + */ 759 + if (check_state(cfe, FS_INT, node->id) && 760 + !check_state(cfe, FE_INT, node->id)) { 761 + cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n", 762 + __func__, node_desc[node->id].name); 763 + cfe_eof_isr(node); 764 + } 765 + 766 + cfe_sof_isr(node); 767 + } 768 + 769 + if (!cfe->job_queued && cfe->job_ready) 770 + cfe_prepare_next_job(cfe); 771 + } 772 + 773 + spin_unlock(&cfe->state_lock); 774 + 775 + return IRQ_HANDLED; 776 + } 777 + 778 + /* 779 + * Stream helpers 780 + */ 781 + 782 + static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt) 783 + { 784 + struct v4l2_subdev_state *state; 785 + struct v4l2_mbus_framefmt *fmt; 786 + const struct cfe_fmt *cfe_fmt; 787 + 788 + state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); 789 + 790 + fmt = v4l2_subdev_state_get_format(state, CSI2_PAD_SINK, 0); 791 + if (!fmt) 792 + return -EINVAL; 793 + 794 + cfe_fmt = find_format_by_code(fmt->code); 795 + if (!cfe_fmt) 796 + return -EINVAL; 797 + 798 + *vc = 0; 799 + *dt = cfe_fmt->csi_dt; 800 + 801 + return 0; 802 + } 803 + 804 + static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc, 805 + u8 *dt) 806 + { 807 + struct v4l2_mbus_frame_desc remote_desc; 808 + struct v4l2_subdev_state *state; 809 + u32 sink_stream; 810 + unsigned int i; 811 + int ret; 812 + 813 + state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); 814 + 815 + ret = v4l2_subdev_routing_find_opposite_end(&state->routing, 816 + CSI2_PAD_FIRST_SOURCE + channel, 0, NULL, &sink_stream); 817 + if (ret) 818 + return ret; 819 + 820 + ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc, 821 + cfe->source_pad, &remote_desc); 822 + if (ret == -ENOIOCTLCMD) { 823 + cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n"); 824 + return cfe_get_vc_dt_fallback(cfe, vc, dt); 825 + } else if (ret) { 826 + cfe_err(cfe, "Failed to get frame descriptor\n"); 827 + return ret; 828 + } 829 + 830 + if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { 831 + cfe_err(cfe, "Frame descriptor does not describe CSI-2 link"); 832 + return -EINVAL; 833 + } 834 + 835 + for (i = 0; i < remote_desc.num_entries; i++) { 836 + if (remote_desc.entry[i].stream == sink_stream) 837 + break; 838 + } 839 + 840 + if (i == remote_desc.num_entries) { 841 + cfe_err(cfe, "Stream %u not found in remote frame desc\n", 842 + sink_stream); 843 + return -EINVAL; 844 + } 845 + 846 + *vc = remote_desc.entry[i].bus.csi2.vc; 847 + *dt = remote_desc.entry[i].bus.csi2.dt; 848 + 849 + return 0; 850 + } 851 + 852 + static int cfe_start_channel(struct cfe_node *node) 853 + { 854 + struct cfe_device *cfe = node->cfe; 855 + struct v4l2_subdev_state *state; 856 + struct v4l2_mbus_framefmt *source_fmt; 857 + const struct cfe_fmt *fmt; 858 + unsigned long flags; 859 + bool start_fe; 860 + int ret; 861 + 862 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 863 + 864 + start_fe = is_fe_enabled(cfe) && 865 + test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); 866 + 867 + state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); 868 + 869 + if (start_fe) { 870 + unsigned int width, height; 871 + u8 vc, dt; 872 + 873 + cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__, 874 + node_desc[FE_OUT0].name, cfe->fe_csi2_channel); 875 + 876 + ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt); 877 + if (ret) 878 + return ret; 879 + 880 + source_fmt = v4l2_subdev_state_get_format(state, 881 + node_desc[cfe->fe_csi2_channel].link_pad); 882 + fmt = find_format_by_code(source_fmt->code); 883 + 884 + width = source_fmt->width; 885 + height = source_fmt->height; 886 + 887 + /* Must have a valid CSI2 datatype. */ 888 + WARN_ON(!fmt->csi_dt); 889 + 890 + /* 891 + * Start the associated CSI2 Channel as well. 892 + * 893 + * Must write to the ADDR register to latch the ctrl values 894 + * even if we are connected to the front end. Once running, 895 + * this is handled by the CSI2 AUTO_ARM mode. 896 + */ 897 + csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel, 898 + CSI2_MODE_FE_STREAMING, 899 + true, false, width, height, vc, dt); 900 + csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1); 901 + pisp_fe_start(&cfe->fe); 902 + } 903 + 904 + if (is_csi2_node(node)) { 905 + unsigned int width = 0, height = 0; 906 + u8 vc, dt; 907 + 908 + ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt); 909 + if (ret) { 910 + if (start_fe) { 911 + csi2_stop_channel(&cfe->csi2, 912 + cfe->fe_csi2_channel); 913 + pisp_fe_stop(&cfe->fe); 914 + } 915 + 916 + return ret; 917 + } 918 + 919 + u32 mode = CSI2_MODE_NORMAL; 920 + 921 + source_fmt = v4l2_subdev_state_get_format(state, 922 + node_desc[node->id].link_pad); 923 + fmt = find_format_by_code(source_fmt->code); 924 + 925 + /* Must have a valid CSI2 datatype. */ 926 + WARN_ON(!fmt->csi_dt); 927 + 928 + if (is_image_output_node(node)) { 929 + u32 pixfmt; 930 + 931 + width = source_fmt->width; 932 + height = source_fmt->height; 933 + 934 + pixfmt = node->vid_fmt.fmt.pix.pixelformat; 935 + 936 + if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) { 937 + mode = CSI2_MODE_REMAP; 938 + } else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) { 939 + mode = CSI2_MODE_COMPRESSED; 940 + csi2_set_compression(&cfe->csi2, node->id, 941 + CSI2_COMPRESSION_DELTA, 0, 942 + 0); 943 + } 944 + } 945 + /* Unconditionally start this CSI2 channel. */ 946 + csi2_start_channel(&cfe->csi2, node->id, 947 + mode, 948 + /* Auto arm */ 949 + false, 950 + /* Pack bytes */ 951 + is_meta_node(node) ? true : false, 952 + width, height, vc, dt); 953 + } 954 + 955 + spin_lock_irqsave(&cfe->state_lock, flags); 956 + if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) 957 + cfe_prepare_next_job(cfe); 958 + spin_unlock_irqrestore(&cfe->state_lock, flags); 959 + 960 + return 0; 961 + } 962 + 963 + static void cfe_stop_channel(struct cfe_node *node, bool fe_stop) 964 + { 965 + struct cfe_device *cfe = node->cfe; 966 + 967 + cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__, 968 + node_desc[node->id].name, fe_stop); 969 + 970 + if (fe_stop) { 971 + csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel); 972 + pisp_fe_stop(&cfe->fe); 973 + } 974 + 975 + if (is_csi2_node(node)) 976 + csi2_stop_channel(&cfe->csi2, node->id); 977 + } 978 + 979 + static void cfe_return_buffers(struct cfe_node *node, 980 + enum vb2_buffer_state state) 981 + { 982 + struct cfe_device *cfe = node->cfe; 983 + struct cfe_buffer *buf, *tmp; 984 + unsigned long flags; 985 + 986 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 987 + 988 + spin_lock_irqsave(&cfe->state_lock, flags); 989 + list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) { 990 + list_del(&buf->list); 991 + trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2); 992 + vb2_buffer_done(&buf->vb.vb2_buf, state); 993 + } 994 + 995 + if (node->cur_frm) { 996 + trace_cfe_return_buffer(node->id, 997 + node->cur_frm->vb.vb2_buf.index, 0); 998 + vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state); 999 + } 1000 + if (node->next_frm && node->cur_frm != node->next_frm) { 1001 + trace_cfe_return_buffer(node->id, 1002 + node->next_frm->vb.vb2_buf.index, 1); 1003 + vb2_buffer_done(&node->next_frm->vb.vb2_buf, state); 1004 + } 1005 + 1006 + node->cur_frm = NULL; 1007 + node->next_frm = NULL; 1008 + spin_unlock_irqrestore(&cfe->state_lock, flags); 1009 + } 1010 + 1011 + /* 1012 + * vb2 ops 1013 + */ 1014 + 1015 + static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, 1016 + unsigned int *nplanes, unsigned int sizes[], 1017 + struct device *alloc_devs[]) 1018 + { 1019 + struct cfe_node *node = vb2_get_drv_priv(vq); 1020 + struct cfe_device *cfe = node->cfe; 1021 + unsigned int size = is_image_node(node) ? 1022 + node->vid_fmt.fmt.pix.sizeimage : 1023 + node->meta_fmt.fmt.meta.buffersize; 1024 + 1025 + cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, 1026 + node->buffer_queue.type); 1027 + 1028 + if (vq->max_num_buffers + *nbuffers < 3) 1029 + *nbuffers = 3 - vq->max_num_buffers; 1030 + 1031 + if (*nplanes) { 1032 + if (sizes[0] < size) { 1033 + cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size); 1034 + return -EINVAL; 1035 + } 1036 + size = sizes[0]; 1037 + } 1038 + 1039 + *nplanes = 1; 1040 + sizes[0] = size; 1041 + 1042 + return 0; 1043 + } 1044 + 1045 + static int cfe_buffer_prepare(struct vb2_buffer *vb) 1046 + { 1047 + struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue); 1048 + struct cfe_device *cfe = node->cfe; 1049 + struct cfe_buffer *buf = to_cfe_buffer(vb); 1050 + unsigned long size; 1051 + 1052 + trace_cfe_buffer_prepare(node->id, vb); 1053 + 1054 + size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage : 1055 + node->meta_fmt.fmt.meta.buffersize; 1056 + if (vb2_plane_size(vb, 0) < size) { 1057 + cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n", 1058 + vb2_plane_size(vb, 0), size); 1059 + return -EINVAL; 1060 + } 1061 + 1062 + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); 1063 + 1064 + if (node->id == FE_CONFIG) { 1065 + struct cfe_config_buffer *b = to_cfe_config_buffer(buf); 1066 + void *addr = vb2_plane_vaddr(vb, 0); 1067 + 1068 + memcpy(&b->config, addr, sizeof(struct pisp_fe_config)); 1069 + return pisp_fe_validate_config(&cfe->fe, &b->config, 1070 + &cfe->node[FE_OUT0].vid_fmt, 1071 + &cfe->node[FE_OUT1].vid_fmt); 1072 + } 1073 + 1074 + return 0; 1075 + } 1076 + 1077 + static void cfe_buffer_queue(struct vb2_buffer *vb) 1078 + { 1079 + struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue); 1080 + struct cfe_device *cfe = node->cfe; 1081 + struct cfe_buffer *buf = to_cfe_buffer(vb); 1082 + unsigned long flags; 1083 + bool schedule_now; 1084 + 1085 + spin_lock_irqsave(&cfe->state_lock, flags); 1086 + 1087 + list_add_tail(&buf->list, &node->dma_queue); 1088 + 1089 + if (!cfe->job_ready) 1090 + cfe->job_ready = cfe_check_job_ready(cfe); 1091 + 1092 + schedule_now = !cfe->job_queued && cfe->job_ready && 1093 + test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); 1094 + 1095 + trace_cfe_buffer_queue(node->id, vb, schedule_now); 1096 + 1097 + if (schedule_now) 1098 + cfe_prepare_next_job(cfe); 1099 + 1100 + spin_unlock_irqrestore(&cfe->state_lock, flags); 1101 + } 1102 + 1103 + static s64 cfe_get_source_link_freq(struct cfe_device *cfe) 1104 + { 1105 + struct v4l2_subdev_state *state; 1106 + s64 link_freq; 1107 + u32 bpp; 1108 + 1109 + state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd); 1110 + 1111 + /* 1112 + * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back 1113 + * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available. 1114 + * 1115 + * With multistream input there is no single pixel rate, and thus we 1116 + * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which 1117 + * causes v4l2_get_link_freq() to return an error if it falls back to 1118 + * V4L2_CID_PIXEL_RATE. 1119 + */ 1120 + 1121 + if (state->routing.num_routes == 1) { 1122 + struct v4l2_subdev_route *route = &state->routing.routes[0]; 1123 + struct v4l2_mbus_framefmt *source_fmt; 1124 + const struct cfe_fmt *fmt; 1125 + 1126 + source_fmt = v4l2_subdev_state_get_format(state, 1127 + route->sink_pad, 1128 + route->sink_stream); 1129 + 1130 + fmt = find_format_by_code(source_fmt->code); 1131 + if (!fmt) 1132 + return -EINVAL; 1133 + 1134 + bpp = fmt->depth; 1135 + } else { 1136 + bpp = 0; 1137 + } 1138 + 1139 + link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp, 1140 + 2 * cfe->csi2.dphy.active_lanes); 1141 + if (link_freq < 0) 1142 + cfe_err(cfe, "failed to get link freq for subdev '%s'\n", 1143 + cfe->source_sd->name); 1144 + 1145 + return link_freq; 1146 + } 1147 + 1148 + static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count) 1149 + { 1150 + struct v4l2_mbus_config mbus_config = { 0 }; 1151 + struct cfe_node *node = vb2_get_drv_priv(vq); 1152 + struct cfe_device *cfe = node->cfe; 1153 + struct v4l2_subdev_state *state; 1154 + struct v4l2_subdev_route *route; 1155 + s64 link_freq; 1156 + int ret; 1157 + 1158 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1159 + 1160 + if (!check_state(cfe, NODE_ENABLED, node->id)) { 1161 + cfe_err(cfe, "%s node link is not enabled.\n", 1162 + node_desc[node->id].name); 1163 + ret = -EINVAL; 1164 + goto err_streaming; 1165 + } 1166 + 1167 + ret = pm_runtime_resume_and_get(&cfe->pdev->dev); 1168 + if (ret < 0) { 1169 + cfe_err(cfe, "pm_runtime_resume_and_get failed\n"); 1170 + goto err_streaming; 1171 + } 1172 + 1173 + /* When using the Frontend, we must enable the FE_CONFIG node. */ 1174 + if (is_fe_enabled(cfe) && 1175 + !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) { 1176 + cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n"); 1177 + ret = -EINVAL; 1178 + goto err_pm_put; 1179 + } 1180 + 1181 + ret = media_pipeline_start(&node->pad, &cfe->pipe); 1182 + if (ret < 0) { 1183 + cfe_err(cfe, "Failed to start media pipeline: %d\n", ret); 1184 + goto err_pm_put; 1185 + } 1186 + 1187 + state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd); 1188 + 1189 + clear_state(cfe, FS_INT | FE_INT, node->id); 1190 + set_state(cfe, NODE_STREAMING, node->id); 1191 + node->fs_count = 0; 1192 + 1193 + ret = cfe_start_channel(node); 1194 + if (ret) 1195 + goto err_unlock_state; 1196 + 1197 + if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) { 1198 + cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n"); 1199 + v4l2_subdev_unlock_state(state); 1200 + return 0; 1201 + } 1202 + 1203 + cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI); 1204 + cfg_reg_write(cfe, MIPICFG_INTE, 1205 + MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE); 1206 + 1207 + ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0, 1208 + &mbus_config); 1209 + if (ret < 0 && ret != -ENOIOCTLCMD) { 1210 + cfe_err(cfe, "g_mbus_config failed\n"); 1211 + goto err_clear_inte; 1212 + } 1213 + 1214 + cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes; 1215 + if (!cfe->csi2.dphy.active_lanes) 1216 + cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes; 1217 + if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) { 1218 + cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n", 1219 + cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes); 1220 + ret = -EINVAL; 1221 + goto err_clear_inte; 1222 + } 1223 + 1224 + link_freq = cfe_get_source_link_freq(cfe); 1225 + if (link_freq < 0) 1226 + goto err_clear_inte; 1227 + 1228 + cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000); 1229 + csi2_open_rx(&cfe->csi2); 1230 + 1231 + cfe->streams_mask = 0; 1232 + 1233 + for_each_active_route(&state->routing, route) 1234 + cfe->streams_mask |= BIT_ULL(route->sink_stream); 1235 + 1236 + ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad, 1237 + cfe->streams_mask); 1238 + if (ret) { 1239 + cfe_err(cfe, "stream on failed in subdev\n"); 1240 + goto err_disable_cfe; 1241 + } 1242 + 1243 + cfe_dbg(cfe, "Streaming enabled\n"); 1244 + 1245 + v4l2_subdev_unlock_state(state); 1246 + 1247 + return 0; 1248 + 1249 + err_disable_cfe: 1250 + csi2_close_rx(&cfe->csi2); 1251 + err_clear_inte: 1252 + cfg_reg_write(cfe, MIPICFG_INTE, 0); 1253 + 1254 + cfe_stop_channel(node, 1255 + is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED, 1256 + NODE_STREAMING)); 1257 + err_unlock_state: 1258 + v4l2_subdev_unlock_state(state); 1259 + media_pipeline_stop(&node->pad); 1260 + err_pm_put: 1261 + pm_runtime_put(&cfe->pdev->dev); 1262 + err_streaming: 1263 + cfe_return_buffers(node, VB2_BUF_STATE_QUEUED); 1264 + clear_state(cfe, NODE_STREAMING, node->id); 1265 + 1266 + return ret; 1267 + } 1268 + 1269 + static void cfe_stop_streaming(struct vb2_queue *vq) 1270 + { 1271 + struct cfe_node *node = vb2_get_drv_priv(vq); 1272 + struct cfe_device *cfe = node->cfe; 1273 + unsigned long flags; 1274 + bool fe_stop; 1275 + 1276 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1277 + 1278 + spin_lock_irqsave(&cfe->state_lock, flags); 1279 + fe_stop = is_fe_enabled(cfe) && 1280 + test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING); 1281 + 1282 + cfe->job_ready = false; 1283 + clear_state(cfe, NODE_STREAMING, node->id); 1284 + spin_unlock_irqrestore(&cfe->state_lock, flags); 1285 + 1286 + cfe_stop_channel(node, fe_stop); 1287 + 1288 + if (!test_any_node(cfe, NODE_STREAMING)) { 1289 + struct v4l2_subdev_state *state; 1290 + int ret; 1291 + 1292 + state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd); 1293 + 1294 + ret = v4l2_subdev_disable_streams(cfe->source_sd, 1295 + cfe->source_pad, 1296 + cfe->streams_mask); 1297 + if (ret) 1298 + cfe_err(cfe, "stream disable failed in subdev\n"); 1299 + 1300 + v4l2_subdev_unlock_state(state); 1301 + 1302 + csi2_close_rx(&cfe->csi2); 1303 + 1304 + cfg_reg_write(cfe, MIPICFG_INTE, 0); 1305 + 1306 + cfe_dbg(cfe, "%s: Streaming disabled\n", __func__); 1307 + } 1308 + 1309 + media_pipeline_stop(&node->pad); 1310 + 1311 + /* Clear all queued buffers for the node */ 1312 + cfe_return_buffers(node, VB2_BUF_STATE_ERROR); 1313 + 1314 + pm_runtime_put(&cfe->pdev->dev); 1315 + } 1316 + 1317 + static const struct vb2_ops cfe_video_qops = { 1318 + .wait_prepare = vb2_ops_wait_prepare, 1319 + .wait_finish = vb2_ops_wait_finish, 1320 + .queue_setup = cfe_queue_setup, 1321 + .buf_prepare = cfe_buffer_prepare, 1322 + .buf_queue = cfe_buffer_queue, 1323 + .start_streaming = cfe_start_streaming, 1324 + .stop_streaming = cfe_stop_streaming, 1325 + }; 1326 + 1327 + /* 1328 + * v4l2 ioctl ops 1329 + */ 1330 + 1331 + static int cfe_querycap(struct file *file, void *priv, 1332 + struct v4l2_capability *cap) 1333 + { 1334 + strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver)); 1335 + strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card)); 1336 + 1337 + cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE | 1338 + V4L2_CAP_META_OUTPUT; 1339 + 1340 + return 0; 1341 + } 1342 + 1343 + static int cfe_enum_fmt_vid_cap(struct file *file, void *priv, 1344 + struct v4l2_fmtdesc *f) 1345 + { 1346 + struct cfe_node *node = video_drvdata(file); 1347 + struct cfe_device *cfe = node->cfe; 1348 + unsigned int i, j; 1349 + 1350 + if (!node_supports_image_output(node)) 1351 + return -EINVAL; 1352 + 1353 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1354 + 1355 + for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) { 1356 + if (f->mbus_code && formats[i].code != f->mbus_code) 1357 + continue; 1358 + 1359 + if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT || 1360 + formats[i].flags & CFE_FORMAT_FLAG_META_CAP) 1361 + continue; 1362 + 1363 + if (is_fe_node(node) && 1364 + !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT)) 1365 + continue; 1366 + 1367 + if (j == f->index) { 1368 + f->pixelformat = formats[i].fourcc; 1369 + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1370 + return 0; 1371 + } 1372 + j++; 1373 + } 1374 + 1375 + return -EINVAL; 1376 + } 1377 + 1378 + static int cfe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) 1379 + { 1380 + struct cfe_node *node = video_drvdata(file); 1381 + 1382 + if (!node_supports_image(node)) 1383 + return -EINVAL; 1384 + 1385 + *f = node->vid_fmt; 1386 + 1387 + return 0; 1388 + } 1389 + 1390 + static int cfe_validate_fmt_vid_cap(struct cfe_node *node, 1391 + struct v4l2_format *f) 1392 + { 1393 + struct cfe_device *cfe = node->cfe; 1394 + const struct cfe_fmt *fmt; 1395 + 1396 + cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__, 1397 + node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height, 1398 + &f->fmt.pix.pixelformat); 1399 + 1400 + if (!node_supports_image_output(node)) 1401 + return -EINVAL; 1402 + 1403 + /* 1404 + * Default to a format that works for both CSI2 and FE. 1405 + */ 1406 + fmt = find_format_by_pix(f->fmt.pix.pixelformat); 1407 + if (!fmt) 1408 + fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10); 1409 + 1410 + f->fmt.pix.pixelformat = fmt->fourcc; 1411 + 1412 + if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) { 1413 + f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT]; 1414 + fmt = find_format_by_pix(f->fmt.pix.pixelformat); 1415 + } 1416 + 1417 + f->fmt.pix.field = V4L2_FIELD_NONE; 1418 + 1419 + cfe_calc_vid_format_size_bpl(cfe, fmt, f); 1420 + 1421 + return 0; 1422 + } 1423 + 1424 + static int cfe_s_fmt_vid_cap(struct file *file, void *priv, 1425 + struct v4l2_format *f) 1426 + { 1427 + struct cfe_node *node = video_drvdata(file); 1428 + struct cfe_device *cfe = node->cfe; 1429 + struct vb2_queue *q = &node->buffer_queue; 1430 + int ret; 1431 + 1432 + if (vb2_is_busy(q)) 1433 + return -EBUSY; 1434 + 1435 + ret = cfe_validate_fmt_vid_cap(node, f); 1436 + if (ret) 1437 + return ret; 1438 + 1439 + node->vid_fmt = *f; 1440 + 1441 + cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__, 1442 + node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height, 1443 + &node->vid_fmt.fmt.pix.pixelformat); 1444 + 1445 + return 0; 1446 + } 1447 + 1448 + static int cfe_try_fmt_vid_cap(struct file *file, void *priv, 1449 + struct v4l2_format *f) 1450 + { 1451 + struct cfe_node *node = video_drvdata(file); 1452 + struct cfe_device *cfe = node->cfe; 1453 + 1454 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1455 + 1456 + return cfe_validate_fmt_vid_cap(node, f); 1457 + } 1458 + 1459 + static int cfe_enum_fmt_meta(struct file *file, void *priv, 1460 + struct v4l2_fmtdesc *f) 1461 + { 1462 + struct cfe_node *node = video_drvdata(file); 1463 + struct cfe_device *cfe = node->cfe; 1464 + 1465 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1466 + 1467 + if (!node_supports_meta(node)) 1468 + return -EINVAL; 1469 + 1470 + switch (node->id) { 1471 + case CSI2_CH0...CSI2_CH3: 1472 + f->flags = V4L2_FMT_FLAG_META_LINE_BASED; 1473 + 1474 + switch (f->index) { 1475 + case 0: 1476 + f->pixelformat = V4L2_META_FMT_GENERIC_8; 1477 + return 0; 1478 + case 1: 1479 + f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10; 1480 + return 0; 1481 + case 2: 1482 + f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12; 1483 + return 0; 1484 + default: 1485 + return -EINVAL; 1486 + } 1487 + default: 1488 + break; 1489 + } 1490 + 1491 + if (f->index != 0) 1492 + return -EINVAL; 1493 + 1494 + switch (node->id) { 1495 + case FE_STATS: 1496 + f->pixelformat = V4L2_META_FMT_RPI_FE_STATS; 1497 + return 0; 1498 + case FE_CONFIG: 1499 + f->pixelformat = V4L2_META_FMT_RPI_FE_CFG; 1500 + return 0; 1501 + default: 1502 + return -EINVAL; 1503 + } 1504 + } 1505 + 1506 + static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f) 1507 + { 1508 + struct cfe_device *cfe = node->cfe; 1509 + const struct cfe_fmt *fmt; 1510 + 1511 + switch (node->id) { 1512 + case CSI2_CH0...CSI2_CH3: 1513 + cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__, 1514 + node_desc[node->id].name, f->fmt.meta.width, 1515 + f->fmt.meta.height, &f->fmt.meta.dataformat); 1516 + break; 1517 + case FE_STATS: 1518 + case FE_CONFIG: 1519 + cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__, 1520 + node_desc[node->id].name, f->fmt.meta.buffersize, 1521 + &f->fmt.meta.dataformat); 1522 + break; 1523 + default: 1524 + return -EINVAL; 1525 + } 1526 + 1527 + if (!node_supports_meta(node)) 1528 + return -EINVAL; 1529 + 1530 + switch (node->id) { 1531 + case CSI2_CH0...CSI2_CH3: 1532 + fmt = find_format_by_pix(f->fmt.meta.dataformat); 1533 + if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP)) 1534 + fmt = find_format_by_pix(V4L2_META_FMT_GENERIC_CSI2_10); 1535 + 1536 + f->fmt.meta.dataformat = fmt->fourcc; 1537 + 1538 + cfe_calc_meta_format_size_bpl(cfe, fmt, f); 1539 + 1540 + return 0; 1541 + case FE_STATS: 1542 + f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS; 1543 + f->fmt.meta.buffersize = sizeof(struct pisp_statistics); 1544 + return 0; 1545 + case FE_CONFIG: 1546 + f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG; 1547 + f->fmt.meta.buffersize = sizeof(struct pisp_fe_config); 1548 + return 0; 1549 + default: 1550 + return -EINVAL; 1551 + } 1552 + } 1553 + 1554 + static int cfe_g_fmt_meta(struct file *file, void *priv, struct v4l2_format *f) 1555 + { 1556 + struct cfe_node *node = video_drvdata(file); 1557 + struct cfe_device *cfe = node->cfe; 1558 + 1559 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1560 + 1561 + if (!node_supports_meta(node)) 1562 + return -EINVAL; 1563 + 1564 + *f = node->meta_fmt; 1565 + 1566 + return 0; 1567 + } 1568 + 1569 + static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f) 1570 + { 1571 + struct cfe_node *node = video_drvdata(file); 1572 + struct cfe_device *cfe = node->cfe; 1573 + struct vb2_queue *q = &node->buffer_queue; 1574 + int ret; 1575 + 1576 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1577 + 1578 + if (vb2_is_busy(q)) 1579 + return -EBUSY; 1580 + 1581 + if (!node_supports_meta(node)) 1582 + return -EINVAL; 1583 + 1584 + ret = cfe_validate_fmt_meta(node, f); 1585 + if (ret) 1586 + return ret; 1587 + 1588 + node->meta_fmt = *f; 1589 + 1590 + cfe_dbg(cfe, "%s: Set %p4cc\n", __func__, 1591 + &node->meta_fmt.fmt.meta.dataformat); 1592 + 1593 + return 0; 1594 + } 1595 + 1596 + static int cfe_try_fmt_meta(struct file *file, void *priv, 1597 + struct v4l2_format *f) 1598 + { 1599 + struct cfe_node *node = video_drvdata(file); 1600 + struct cfe_device *cfe = node->cfe; 1601 + 1602 + cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name); 1603 + return cfe_validate_fmt_meta(node, f); 1604 + } 1605 + 1606 + static int cfe_enum_framesizes(struct file *file, void *priv, 1607 + struct v4l2_frmsizeenum *fsize) 1608 + { 1609 + struct cfe_node *node = video_drvdata(file); 1610 + struct cfe_device *cfe = node->cfe; 1611 + const struct cfe_fmt *fmt; 1612 + 1613 + cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name); 1614 + 1615 + if (fsize->index > 0) 1616 + return -EINVAL; 1617 + 1618 + /* check for valid format */ 1619 + fmt = find_format_by_pix(fsize->pixel_format); 1620 + if (!fmt) { 1621 + cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format); 1622 + return -EINVAL; 1623 + } 1624 + 1625 + /* TODO: Do we have limits on the step_width? */ 1626 + 1627 + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1628 + fsize->stepwise.min_width = MIN_WIDTH; 1629 + fsize->stepwise.max_width = MAX_WIDTH; 1630 + fsize->stepwise.step_width = 2; 1631 + fsize->stepwise.min_height = MIN_HEIGHT; 1632 + fsize->stepwise.max_height = MAX_HEIGHT; 1633 + fsize->stepwise.step_height = 1; 1634 + 1635 + return 0; 1636 + } 1637 + 1638 + static int cfe_vb2_ioctl_reqbufs(struct file *file, void *priv, 1639 + struct v4l2_requestbuffers *p) 1640 + { 1641 + struct video_device *vdev = video_devdata(file); 1642 + struct cfe_node *node = video_get_drvdata(vdev); 1643 + struct cfe_device *cfe = node->cfe; 1644 + int ret; 1645 + 1646 + cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, 1647 + p->type); 1648 + 1649 + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 1650 + p->type != V4L2_BUF_TYPE_META_CAPTURE && 1651 + p->type != V4L2_BUF_TYPE_META_OUTPUT) 1652 + return -EINVAL; 1653 + 1654 + ret = vb2_queue_change_type(vdev->queue, p->type); 1655 + if (ret) 1656 + return ret; 1657 + 1658 + return vb2_ioctl_reqbufs(file, priv, p); 1659 + } 1660 + 1661 + static int cfe_vb2_ioctl_create_bufs(struct file *file, void *priv, 1662 + struct v4l2_create_buffers *p) 1663 + { 1664 + struct video_device *vdev = video_devdata(file); 1665 + struct cfe_node *node = video_get_drvdata(vdev); 1666 + struct cfe_device *cfe = node->cfe; 1667 + int ret; 1668 + 1669 + cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name, 1670 + p->format.type); 1671 + 1672 + if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE && 1673 + p->format.type != V4L2_BUF_TYPE_META_CAPTURE && 1674 + p->format.type != V4L2_BUF_TYPE_META_OUTPUT) 1675 + return -EINVAL; 1676 + 1677 + ret = vb2_queue_change_type(vdev->queue, p->format.type); 1678 + if (ret) 1679 + return ret; 1680 + 1681 + return vb2_ioctl_create_bufs(file, priv, p); 1682 + } 1683 + 1684 + static int cfe_subscribe_event(struct v4l2_fh *fh, 1685 + const struct v4l2_event_subscription *sub) 1686 + { 1687 + struct cfe_node *node = video_get_drvdata(fh->vdev); 1688 + 1689 + switch (sub->type) { 1690 + case V4L2_EVENT_FRAME_SYNC: 1691 + if (!node_supports_image_output(node)) 1692 + break; 1693 + 1694 + return v4l2_event_subscribe(fh, sub, 2, NULL); 1695 + case V4L2_EVENT_SOURCE_CHANGE: 1696 + if (!node_supports_image_output(node) && 1697 + !node_supports_meta_output(node)) 1698 + break; 1699 + 1700 + return v4l2_event_subscribe(fh, sub, 4, NULL); 1701 + } 1702 + 1703 + return v4l2_ctrl_subscribe_event(fh, sub); 1704 + } 1705 + 1706 + static const struct v4l2_ioctl_ops cfe_ioctl_ops = { 1707 + .vidioc_querycap = cfe_querycap, 1708 + .vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap, 1709 + .vidioc_g_fmt_vid_cap = cfe_g_fmt, 1710 + .vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap, 1711 + .vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap, 1712 + 1713 + .vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta, 1714 + .vidioc_g_fmt_meta_cap = cfe_g_fmt_meta, 1715 + .vidioc_s_fmt_meta_cap = cfe_s_fmt_meta, 1716 + .vidioc_try_fmt_meta_cap = cfe_try_fmt_meta, 1717 + 1718 + .vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta, 1719 + .vidioc_g_fmt_meta_out = cfe_g_fmt_meta, 1720 + .vidioc_s_fmt_meta_out = cfe_s_fmt_meta, 1721 + .vidioc_try_fmt_meta_out = cfe_try_fmt_meta, 1722 + 1723 + .vidioc_enum_framesizes = cfe_enum_framesizes, 1724 + 1725 + .vidioc_reqbufs = cfe_vb2_ioctl_reqbufs, 1726 + .vidioc_create_bufs = cfe_vb2_ioctl_create_bufs, 1727 + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 1728 + .vidioc_querybuf = vb2_ioctl_querybuf, 1729 + .vidioc_qbuf = vb2_ioctl_qbuf, 1730 + .vidioc_dqbuf = vb2_ioctl_dqbuf, 1731 + .vidioc_expbuf = vb2_ioctl_expbuf, 1732 + .vidioc_streamon = vb2_ioctl_streamon, 1733 + .vidioc_streamoff = vb2_ioctl_streamoff, 1734 + 1735 + .vidioc_subscribe_event = cfe_subscribe_event, 1736 + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 1737 + }; 1738 + 1739 + static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification, 1740 + void *arg) 1741 + { 1742 + struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev); 1743 + 1744 + switch (notification) { 1745 + case V4L2_DEVICE_NOTIFY_EVENT: 1746 + for (unsigned int i = 0; i < NUM_NODES; i++) { 1747 + struct cfe_node *node = &cfe->node[i]; 1748 + 1749 + if (check_state(cfe, NODE_REGISTERED, i)) 1750 + continue; 1751 + 1752 + v4l2_event_queue(&node->video_dev, arg); 1753 + } 1754 + break; 1755 + default: 1756 + break; 1757 + } 1758 + } 1759 + 1760 + /* cfe capture driver file operations */ 1761 + static const struct v4l2_file_operations cfe_fops = { 1762 + .owner = THIS_MODULE, 1763 + .open = v4l2_fh_open, 1764 + .release = vb2_fop_release, 1765 + .poll = vb2_fop_poll, 1766 + .unlocked_ioctl = video_ioctl2, 1767 + .mmap = vb2_fop_mmap, 1768 + }; 1769 + 1770 + static int cfe_video_link_validate(struct media_link *link) 1771 + { 1772 + struct video_device *vd = container_of(link->sink->entity, 1773 + struct video_device, entity); 1774 + struct cfe_node *node = container_of(vd, struct cfe_node, video_dev); 1775 + struct cfe_device *cfe = node->cfe; 1776 + struct v4l2_mbus_framefmt *source_fmt; 1777 + struct v4l2_subdev_state *state; 1778 + struct v4l2_subdev *source_sd; 1779 + int ret = 0; 1780 + 1781 + cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__, 1782 + node_desc[node->id].name, 1783 + link->source->entity->name, link->source->index, 1784 + link->sink->entity->name, link->sink->index); 1785 + 1786 + if (!media_entity_remote_source_pad_unique(link->sink->entity)) { 1787 + cfe_err(cfe, "video node %s pad not connected\n", vd->name); 1788 + return -ENOTCONN; 1789 + } 1790 + 1791 + source_sd = media_entity_to_v4l2_subdev(link->source->entity); 1792 + 1793 + state = v4l2_subdev_lock_and_get_active_state(source_sd); 1794 + 1795 + source_fmt = v4l2_subdev_state_get_format(state, link->source->index); 1796 + if (!source_fmt) { 1797 + ret = -EINVAL; 1798 + goto out; 1799 + } 1800 + 1801 + if (is_image_output_node(node)) { 1802 + struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix; 1803 + const struct cfe_fmt *fmt; 1804 + 1805 + if (source_fmt->width != pix_fmt->width || 1806 + source_fmt->height != pix_fmt->height) { 1807 + cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n", 1808 + pix_fmt->width, pix_fmt->height, 1809 + source_fmt->width, source_fmt->height); 1810 + ret = -EINVAL; 1811 + goto out; 1812 + } 1813 + 1814 + fmt = find_format_by_code_and_fourcc(source_fmt->code, 1815 + pix_fmt->pixelformat); 1816 + if (!fmt) { 1817 + cfe_err(cfe, "Format mismatch!\n"); 1818 + ret = -EINVAL; 1819 + goto out; 1820 + } 1821 + } else if (is_csi2_node(node) && is_meta_output_node(node)) { 1822 + struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta; 1823 + const struct cfe_fmt *fmt; 1824 + 1825 + if (source_fmt->width != meta_fmt->width || 1826 + source_fmt->height != meta_fmt->height) { 1827 + cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n", 1828 + meta_fmt->width, meta_fmt->height, 1829 + source_fmt->width, source_fmt->height); 1830 + ret = -EINVAL; 1831 + goto out; 1832 + } 1833 + 1834 + fmt = find_format_by_code_and_fourcc(source_fmt->code, 1835 + meta_fmt->dataformat); 1836 + if (!fmt) { 1837 + cfe_err(cfe, "Format mismatch!\n"); 1838 + ret = -EINVAL; 1839 + goto out; 1840 + } 1841 + } 1842 + 1843 + out: 1844 + v4l2_subdev_unlock_state(state); 1845 + 1846 + return ret; 1847 + } 1848 + 1849 + static const struct media_entity_operations cfe_media_entity_ops = { 1850 + .link_validate = cfe_video_link_validate, 1851 + }; 1852 + 1853 + static int cfe_video_link_notify(struct media_link *link, u32 flags, 1854 + unsigned int notification) 1855 + { 1856 + struct media_device *mdev = link->graph_obj.mdev; 1857 + struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev); 1858 + struct media_entity *fe = &cfe->fe.sd.entity; 1859 + struct media_entity *csi2 = &cfe->csi2.sd.entity; 1860 + unsigned long lock_flags; 1861 + 1862 + if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH) 1863 + return 0; 1864 + 1865 + cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__, 1866 + link->source->entity->name, link->source->index, 1867 + link->sink->entity->name, link->sink->index, flags); 1868 + 1869 + spin_lock_irqsave(&cfe->state_lock, lock_flags); 1870 + 1871 + for (unsigned int i = 0; i < NUM_NODES; i++) { 1872 + if (link->sink->entity != &cfe->node[i].video_dev.entity && 1873 + link->source->entity != &cfe->node[i].video_dev.entity) 1874 + continue; 1875 + 1876 + if (link->flags & MEDIA_LNK_FL_ENABLED) 1877 + set_state(cfe, NODE_ENABLED, i); 1878 + else 1879 + clear_state(cfe, NODE_ENABLED, i); 1880 + 1881 + break; 1882 + } 1883 + 1884 + spin_unlock_irqrestore(&cfe->state_lock, lock_flags); 1885 + 1886 + if (link->source->entity != csi2) 1887 + return 0; 1888 + if (link->sink->entity != fe) 1889 + return 0; 1890 + if (link->sink->index != 0) 1891 + return 0; 1892 + 1893 + cfe->fe_csi2_channel = -1; 1894 + if (link->flags & MEDIA_LNK_FL_ENABLED) { 1895 + if (link->source->index == node_desc[CSI2_CH0].link_pad) 1896 + cfe->fe_csi2_channel = CSI2_CH0; 1897 + else if (link->source->index == node_desc[CSI2_CH1].link_pad) 1898 + cfe->fe_csi2_channel = CSI2_CH1; 1899 + else if (link->source->index == node_desc[CSI2_CH2].link_pad) 1900 + cfe->fe_csi2_channel = CSI2_CH2; 1901 + else if (link->source->index == node_desc[CSI2_CH3].link_pad) 1902 + cfe->fe_csi2_channel = CSI2_CH3; 1903 + } 1904 + 1905 + if (is_fe_enabled(cfe)) 1906 + cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__, 1907 + cfe->fe_csi2_channel); 1908 + else 1909 + cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n", 1910 + __func__); 1911 + 1912 + return 0; 1913 + } 1914 + 1915 + static const struct media_device_ops cfe_media_device_ops = { 1916 + .link_notify = cfe_video_link_notify, 1917 + }; 1918 + 1919 + static void cfe_release(struct kref *kref) 1920 + { 1921 + struct cfe_device *cfe = container_of(kref, struct cfe_device, kref); 1922 + 1923 + media_device_cleanup(&cfe->mdev); 1924 + 1925 + kfree(cfe); 1926 + } 1927 + 1928 + static void cfe_put(struct cfe_device *cfe) 1929 + { 1930 + kref_put(&cfe->kref, cfe_release); 1931 + } 1932 + 1933 + static void cfe_get(struct cfe_device *cfe) 1934 + { 1935 + kref_get(&cfe->kref); 1936 + } 1937 + 1938 + static void cfe_node_release(struct video_device *vdev) 1939 + { 1940 + struct cfe_node *node = video_get_drvdata(vdev); 1941 + 1942 + cfe_put(node->cfe); 1943 + } 1944 + 1945 + static int cfe_register_node(struct cfe_device *cfe, int id) 1946 + { 1947 + struct video_device *vdev; 1948 + const struct cfe_fmt *fmt; 1949 + struct vb2_queue *q; 1950 + struct cfe_node *node = &cfe->node[id]; 1951 + int ret; 1952 + 1953 + node->cfe = cfe; 1954 + node->id = id; 1955 + 1956 + if (node_supports_image(node)) { 1957 + if (node_supports_image_output(node)) 1958 + node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1959 + else 1960 + node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 1961 + 1962 + fmt = find_format_by_code(cfe_default_format.code); 1963 + if (!fmt) { 1964 + cfe_err(cfe, "Failed to find format code\n"); 1965 + return -EINVAL; 1966 + } 1967 + 1968 + node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc; 1969 + v4l2_fill_pix_format(&node->vid_fmt.fmt.pix, 1970 + &cfe_default_format); 1971 + 1972 + ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt); 1973 + if (ret) 1974 + return ret; 1975 + } 1976 + 1977 + if (node_supports_meta(node)) { 1978 + if (node_supports_meta_output(node)) 1979 + node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE; 1980 + else 1981 + node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT; 1982 + 1983 + ret = cfe_validate_fmt_meta(node, &node->meta_fmt); 1984 + if (ret) 1985 + return ret; 1986 + } 1987 + 1988 + mutex_init(&node->lock); 1989 + 1990 + q = &node->buffer_queue; 1991 + q->type = node_supports_image(node) ? node->vid_fmt.type : 1992 + node->meta_fmt.type; 1993 + q->io_modes = VB2_MMAP | VB2_DMABUF; 1994 + q->drv_priv = node; 1995 + q->ops = &cfe_video_qops; 1996 + q->mem_ops = &vb2_dma_contig_memops; 1997 + q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer) 1998 + : sizeof(struct cfe_buffer); 1999 + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 2000 + q->lock = &node->lock; 2001 + q->min_queued_buffers = 1; 2002 + q->dev = &cfe->pdev->dev; 2003 + 2004 + ret = vb2_queue_init(q); 2005 + if (ret) { 2006 + cfe_err(cfe, "vb2_queue_init() failed\n"); 2007 + return ret; 2008 + } 2009 + 2010 + INIT_LIST_HEAD(&node->dma_queue); 2011 + 2012 + vdev = &node->video_dev; 2013 + vdev->release = cfe_node_release; 2014 + vdev->fops = &cfe_fops; 2015 + vdev->ioctl_ops = &cfe_ioctl_ops; 2016 + vdev->entity.ops = &cfe_media_entity_ops; 2017 + vdev->v4l2_dev = &cfe->v4l2_dev; 2018 + vdev->vfl_dir = (node_supports_image_output(node) || 2019 + node_supports_meta_output(node)) ? 2020 + VFL_DIR_RX : 2021 + VFL_DIR_TX; 2022 + vdev->queue = q; 2023 + vdev->lock = &node->lock; 2024 + vdev->device_caps = node_desc[id].caps; 2025 + vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; 2026 + 2027 + /* Define the device names */ 2028 + snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME, 2029 + node_desc[id].name); 2030 + 2031 + video_set_drvdata(vdev, node); 2032 + node->pad.flags = node_desc[id].pad_flags; 2033 + media_entity_pads_init(&vdev->entity, 1, &node->pad); 2034 + 2035 + if (!node_supports_image(node)) { 2036 + v4l2_disable_ioctl(&node->video_dev, 2037 + VIDIOC_ENUM_FRAMEINTERVALS); 2038 + v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES); 2039 + } 2040 + 2041 + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); 2042 + if (ret) { 2043 + cfe_err(cfe, "Unable to register video device %s\n", 2044 + vdev->name); 2045 + return ret; 2046 + } 2047 + 2048 + cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n", 2049 + vdev->name, id, vdev->num); 2050 + 2051 + /* 2052 + * Acquire a reference to cfe, which will be released when the video 2053 + * device will be unregistered and userspace will have closed all open 2054 + * file handles. 2055 + */ 2056 + cfe_get(cfe); 2057 + set_state(cfe, NODE_REGISTERED, id); 2058 + 2059 + return 0; 2060 + } 2061 + 2062 + static void cfe_unregister_nodes(struct cfe_device *cfe) 2063 + { 2064 + for (unsigned int i = 0; i < NUM_NODES; i++) { 2065 + struct cfe_node *node = &cfe->node[i]; 2066 + 2067 + if (check_state(cfe, NODE_REGISTERED, i)) { 2068 + clear_state(cfe, NODE_REGISTERED, i); 2069 + video_unregister_device(&node->video_dev); 2070 + } 2071 + } 2072 + } 2073 + 2074 + static int cfe_link_node_pads(struct cfe_device *cfe) 2075 + { 2076 + struct media_pad *remote_pad; 2077 + int ret; 2078 + 2079 + /* Source -> CSI2 */ 2080 + 2081 + ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd, 2082 + &cfe->csi2.pad[CSI2_PAD_SINK], 2083 + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 2084 + 2085 + if (ret) { 2086 + cfe_err(cfe, "Failed to create links to the source: %d\n", ret); 2087 + return ret; 2088 + } 2089 + 2090 + remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]); 2091 + if (IS_ERR(remote_pad)) { 2092 + ret = PTR_ERR(remote_pad); 2093 + cfe_err(cfe, "Failed to get unique remote source pad: %d\n", 2094 + ret); 2095 + return ret; 2096 + } 2097 + 2098 + cfe->source_pad = remote_pad->index; 2099 + 2100 + for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) { 2101 + struct cfe_node *node = &cfe->node[i]; 2102 + 2103 + if (!check_state(cfe, NODE_REGISTERED, i)) 2104 + continue; 2105 + 2106 + /* CSI2 channel # -> /dev/video# */ 2107 + ret = media_create_pad_link(&cfe->csi2.sd.entity, 2108 + node_desc[i].link_pad, 2109 + &node->video_dev.entity, 0, 0); 2110 + if (ret) 2111 + return ret; 2112 + 2113 + if (node_supports_image(node)) { 2114 + /* CSI2 channel # -> FE Input */ 2115 + ret = media_create_pad_link(&cfe->csi2.sd.entity, 2116 + node_desc[i].link_pad, 2117 + &cfe->fe.sd.entity, 2118 + FE_STREAM_PAD, 0); 2119 + if (ret) 2120 + return ret; 2121 + } 2122 + } 2123 + 2124 + for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) { 2125 + struct cfe_node *node = &cfe->node[i]; 2126 + struct media_entity *src, *dst; 2127 + unsigned int src_pad, dst_pad; 2128 + 2129 + if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) { 2130 + /* FE -> /dev/video# */ 2131 + src = &cfe->fe.sd.entity; 2132 + src_pad = node_desc[i].link_pad; 2133 + dst = &node->video_dev.entity; 2134 + dst_pad = 0; 2135 + } else { 2136 + /* /dev/video# -> FE */ 2137 + dst = &cfe->fe.sd.entity; 2138 + dst_pad = node_desc[i].link_pad; 2139 + src = &node->video_dev.entity; 2140 + src_pad = 0; 2141 + } 2142 + 2143 + ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0); 2144 + if (ret) 2145 + return ret; 2146 + } 2147 + 2148 + return 0; 2149 + } 2150 + 2151 + static int cfe_probe_complete(struct cfe_device *cfe) 2152 + { 2153 + int ret; 2154 + 2155 + cfe->v4l2_dev.notify = cfe_notify; 2156 + 2157 + for (unsigned int i = 0; i < NUM_NODES; i++) { 2158 + ret = cfe_register_node(cfe, i); 2159 + if (ret) { 2160 + cfe_err(cfe, "Unable to register video node %u.\n", i); 2161 + goto unregister; 2162 + } 2163 + } 2164 + 2165 + ret = cfe_link_node_pads(cfe); 2166 + if (ret) { 2167 + cfe_err(cfe, "Unable to link node pads.\n"); 2168 + goto unregister; 2169 + } 2170 + 2171 + ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev); 2172 + if (ret) { 2173 + cfe_err(cfe, "Unable to register subdev nodes.\n"); 2174 + goto unregister; 2175 + } 2176 + 2177 + return 0; 2178 + 2179 + unregister: 2180 + cfe_unregister_nodes(cfe); 2181 + return ret; 2182 + } 2183 + 2184 + static int cfe_async_bound(struct v4l2_async_notifier *notifier, 2185 + struct v4l2_subdev *subdev, 2186 + struct v4l2_async_connection *asd) 2187 + { 2188 + struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev); 2189 + 2190 + if (cfe->source_sd) { 2191 + cfe_err(cfe, "Rejecting subdev %s (Already set!!)", 2192 + subdev->name); 2193 + return 0; 2194 + } 2195 + 2196 + cfe->source_sd = subdev; 2197 + 2198 + cfe_dbg(cfe, "Using source %s for capture\n", subdev->name); 2199 + 2200 + return 0; 2201 + } 2202 + 2203 + static int cfe_async_complete(struct v4l2_async_notifier *notifier) 2204 + { 2205 + struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev); 2206 + 2207 + return cfe_probe_complete(cfe); 2208 + } 2209 + 2210 + static const struct v4l2_async_notifier_operations cfe_async_ops = { 2211 + .bound = cfe_async_bound, 2212 + .complete = cfe_async_complete, 2213 + }; 2214 + 2215 + static int cfe_register_async_nf(struct cfe_device *cfe) 2216 + { 2217 + struct platform_device *pdev = cfe->pdev; 2218 + struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; 2219 + struct fwnode_handle *local_ep_fwnode; 2220 + struct v4l2_async_connection *asd; 2221 + int ret; 2222 + 2223 + local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0, 2224 + 0, 0); 2225 + if (!local_ep_fwnode) { 2226 + cfe_err(cfe, "Failed to find local endpoint fwnode\n"); 2227 + return -ENODEV; 2228 + } 2229 + 2230 + /* Parse the local endpoint and validate its configuration. */ 2231 + ret = v4l2_fwnode_endpoint_parse(local_ep_fwnode, &ep); 2232 + if (ret) { 2233 + cfe_err(cfe, "Failed to find remote endpoint fwnode\n"); 2234 + goto err_put_local_fwnode; 2235 + } 2236 + 2237 + for (unsigned int lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes; 2238 + lane++) { 2239 + if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) { 2240 + cfe_err(cfe, "Data lanes reordering not supported\n"); 2241 + ret = -EINVAL; 2242 + goto err_put_local_fwnode; 2243 + } 2244 + } 2245 + 2246 + cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes; 2247 + cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags; 2248 + 2249 + /* Initialize and register the async notifier. */ 2250 + v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev); 2251 + cfe->notifier.ops = &cfe_async_ops; 2252 + 2253 + asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode, 2254 + struct v4l2_async_connection); 2255 + if (IS_ERR(asd)) { 2256 + ret = PTR_ERR(asd); 2257 + cfe_err(cfe, "Error adding subdevice: %d\n", ret); 2258 + goto err_put_local_fwnode; 2259 + } 2260 + 2261 + ret = v4l2_async_nf_register(&cfe->notifier); 2262 + if (ret) { 2263 + cfe_err(cfe, "Error registering async notifier: %d\n", ret); 2264 + goto err_nf_cleanup; 2265 + } 2266 + 2267 + fwnode_handle_put(local_ep_fwnode); 2268 + 2269 + return 0; 2270 + 2271 + err_nf_cleanup: 2272 + v4l2_async_nf_cleanup(&cfe->notifier); 2273 + err_put_local_fwnode: 2274 + fwnode_handle_put(local_ep_fwnode); 2275 + 2276 + return ret; 2277 + } 2278 + 2279 + static int cfe_probe(struct platform_device *pdev) 2280 + { 2281 + struct cfe_device *cfe; 2282 + char debugfs_name[32]; 2283 + int ret; 2284 + 2285 + cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); 2286 + if (!cfe) 2287 + return -ENOMEM; 2288 + 2289 + platform_set_drvdata(pdev, cfe); 2290 + 2291 + kref_init(&cfe->kref); 2292 + cfe->pdev = pdev; 2293 + cfe->fe_csi2_channel = -1; 2294 + spin_lock_init(&cfe->state_lock); 2295 + 2296 + cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0); 2297 + if (IS_ERR(cfe->csi2.base)) { 2298 + dev_err(&pdev->dev, "Failed to get dma io block\n"); 2299 + ret = PTR_ERR(cfe->csi2.base); 2300 + goto err_cfe_put; 2301 + } 2302 + 2303 + cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1); 2304 + if (IS_ERR(cfe->csi2.dphy.base)) { 2305 + dev_err(&pdev->dev, "Failed to get host io block\n"); 2306 + ret = PTR_ERR(cfe->csi2.dphy.base); 2307 + goto err_cfe_put; 2308 + } 2309 + 2310 + cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2); 2311 + if (IS_ERR(cfe->mipi_cfg_base)) { 2312 + dev_err(&pdev->dev, "Failed to get mipi cfg io block\n"); 2313 + ret = PTR_ERR(cfe->mipi_cfg_base); 2314 + goto err_cfe_put; 2315 + } 2316 + 2317 + cfe->fe.base = devm_platform_ioremap_resource(pdev, 3); 2318 + if (IS_ERR(cfe->fe.base)) { 2319 + dev_err(&pdev->dev, "Failed to get pisp fe io block\n"); 2320 + ret = PTR_ERR(cfe->fe.base); 2321 + goto err_cfe_put; 2322 + } 2323 + 2324 + ret = platform_get_irq(pdev, 0); 2325 + if (ret <= 0) { 2326 + dev_err(&pdev->dev, "No IRQ resource\n"); 2327 + ret = -EINVAL; 2328 + goto err_cfe_put; 2329 + } 2330 + 2331 + ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe); 2332 + if (ret) { 2333 + dev_err(&pdev->dev, "Unable to request interrupt\n"); 2334 + ret = -EINVAL; 2335 + goto err_cfe_put; 2336 + } 2337 + 2338 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2339 + if (ret) { 2340 + dev_err(&pdev->dev, "DMA enable failed\n"); 2341 + goto err_cfe_put; 2342 + } 2343 + 2344 + /* TODO: Enable clock only when running. */ 2345 + cfe->clk = devm_clk_get(&pdev->dev, NULL); 2346 + if (IS_ERR(cfe->clk)) 2347 + return dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk), 2348 + "clock not found\n"); 2349 + 2350 + cfe->mdev.dev = &pdev->dev; 2351 + cfe->mdev.ops = &cfe_media_device_ops; 2352 + strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model)); 2353 + strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial)); 2354 + snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s", 2355 + dev_name(&pdev->dev)); 2356 + 2357 + media_device_init(&cfe->mdev); 2358 + 2359 + cfe->v4l2_dev.mdev = &cfe->mdev; 2360 + 2361 + ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev); 2362 + if (ret) { 2363 + cfe_err(cfe, "Unable to register v4l2 device.\n"); 2364 + goto err_cfe_put; 2365 + } 2366 + 2367 + snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s", 2368 + dev_name(&pdev->dev)); 2369 + cfe->debugfs = debugfs_create_dir(debugfs_name, NULL); 2370 + debugfs_create_file("regs", 0440, cfe->debugfs, cfe, 2371 + &mipi_cfg_regs_fops); 2372 + 2373 + /* Enable the block power domain */ 2374 + pm_runtime_enable(&pdev->dev); 2375 + 2376 + ret = pm_runtime_resume_and_get(&cfe->pdev->dev); 2377 + if (ret) 2378 + goto err_runtime_disable; 2379 + 2380 + cfe->csi2.v4l2_dev = &cfe->v4l2_dev; 2381 + ret = csi2_init(&cfe->csi2, cfe->debugfs); 2382 + if (ret) { 2383 + cfe_err(cfe, "Failed to init csi2 (%d)\n", ret); 2384 + goto err_runtime_put; 2385 + } 2386 + 2387 + cfe->fe.v4l2_dev = &cfe->v4l2_dev; 2388 + ret = pisp_fe_init(&cfe->fe, cfe->debugfs); 2389 + if (ret) { 2390 + cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret); 2391 + goto err_csi2_uninit; 2392 + } 2393 + 2394 + cfe->mdev.hw_revision = cfe->fe.hw_revision; 2395 + ret = media_device_register(&cfe->mdev); 2396 + if (ret < 0) { 2397 + cfe_err(cfe, "Unable to register media-controller device.\n"); 2398 + goto err_pisp_fe_uninit; 2399 + } 2400 + 2401 + ret = cfe_register_async_nf(cfe); 2402 + if (ret) { 2403 + cfe_err(cfe, "Failed to connect subdevs\n"); 2404 + goto err_media_unregister; 2405 + } 2406 + 2407 + pm_runtime_put(&cfe->pdev->dev); 2408 + 2409 + return 0; 2410 + 2411 + err_media_unregister: 2412 + media_device_unregister(&cfe->mdev); 2413 + err_pisp_fe_uninit: 2414 + pisp_fe_uninit(&cfe->fe); 2415 + err_csi2_uninit: 2416 + csi2_uninit(&cfe->csi2); 2417 + err_runtime_put: 2418 + pm_runtime_put(&cfe->pdev->dev); 2419 + err_runtime_disable: 2420 + pm_runtime_disable(&pdev->dev); 2421 + debugfs_remove(cfe->debugfs); 2422 + v4l2_device_unregister(&cfe->v4l2_dev); 2423 + err_cfe_put: 2424 + cfe_put(cfe); 2425 + 2426 + return ret; 2427 + } 2428 + 2429 + static void cfe_remove(struct platform_device *pdev) 2430 + { 2431 + struct cfe_device *cfe = platform_get_drvdata(pdev); 2432 + 2433 + debugfs_remove(cfe->debugfs); 2434 + 2435 + v4l2_async_nf_unregister(&cfe->notifier); 2436 + v4l2_async_nf_cleanup(&cfe->notifier); 2437 + 2438 + media_device_unregister(&cfe->mdev); 2439 + cfe_unregister_nodes(cfe); 2440 + 2441 + pisp_fe_uninit(&cfe->fe); 2442 + csi2_uninit(&cfe->csi2); 2443 + 2444 + pm_runtime_disable(&pdev->dev); 2445 + 2446 + v4l2_device_unregister(&cfe->v4l2_dev); 2447 + 2448 + cfe_put(cfe); 2449 + } 2450 + 2451 + static int cfe_runtime_suspend(struct device *dev) 2452 + { 2453 + struct platform_device *pdev = to_platform_device(dev); 2454 + struct cfe_device *cfe = platform_get_drvdata(pdev); 2455 + 2456 + clk_disable_unprepare(cfe->clk); 2457 + 2458 + return 0; 2459 + } 2460 + 2461 + static int cfe_runtime_resume(struct device *dev) 2462 + { 2463 + struct platform_device *pdev = to_platform_device(dev); 2464 + struct cfe_device *cfe = platform_get_drvdata(pdev); 2465 + int ret; 2466 + 2467 + ret = clk_prepare_enable(cfe->clk); 2468 + if (ret) { 2469 + dev_err(dev, "Unable to enable clock\n"); 2470 + return ret; 2471 + } 2472 + 2473 + return 0; 2474 + } 2475 + 2476 + static const struct dev_pm_ops cfe_pm_ops = { 2477 + SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL) 2478 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2479 + pm_runtime_force_resume) 2480 + }; 2481 + 2482 + static const struct of_device_id cfe_of_match[] = { 2483 + { .compatible = "raspberrypi,rp1-cfe" }, 2484 + { /* sentinel */ }, 2485 + }; 2486 + MODULE_DEVICE_TABLE(of, cfe_of_match); 2487 + 2488 + static struct platform_driver cfe_driver = { 2489 + .probe = cfe_probe, 2490 + .remove = cfe_remove, 2491 + .driver = { 2492 + .name = CFE_MODULE_NAME, 2493 + .of_match_table = cfe_of_match, 2494 + .pm = &cfe_pm_ops, 2495 + }, 2496 + }; 2497 + 2498 + module_platform_driver(cfe_driver); 2499 + 2500 + MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>"); 2501 + MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>"); 2502 + MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver"); 2503 + MODULE_LICENSE("GPL"); 2504 + MODULE_VERSION(CFE_VERSION);
+43
drivers/media/platform/raspberrypi/rp1-cfe/cfe.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * RP1 CFE Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + * Copyright (c) 2023-2024 Ideas on Board Oy 7 + */ 8 + #ifndef _RP1_CFE_ 9 + #define _RP1_CFE_ 10 + 11 + #include <linux/media-bus-format.h> 12 + #include <linux/types.h> 13 + #include <linux/videodev2.h> 14 + 15 + extern bool cfe_debug_verbose; 16 + 17 + enum cfe_remap_types { 18 + CFE_REMAP_16BIT, 19 + CFE_REMAP_COMPRESSED, 20 + CFE_NUM_REMAP, 21 + }; 22 + 23 + #define CFE_FORMAT_FLAG_META_OUT BIT(0) 24 + #define CFE_FORMAT_FLAG_META_CAP BIT(1) 25 + #define CFE_FORMAT_FLAG_FE_OUT BIT(2) 26 + 27 + struct cfe_fmt { 28 + u32 fourcc; 29 + u32 code; 30 + u8 depth; 31 + u8 csi_dt; 32 + u32 remap[CFE_NUM_REMAP]; 33 + u32 flags; 34 + }; 35 + 36 + extern const struct v4l2_mbus_framefmt cfe_default_format; 37 + 38 + const struct cfe_fmt *find_format_by_code(u32 code); 39 + const struct cfe_fmt *find_format_by_pix(u32 pixelformat); 40 + u32 cfe_find_16bit_code(u32 code); 41 + u32 cfe_find_compressed_code(u32 code); 42 + 43 + #endif
+586
drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * RP1 CSI-2 Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + * Copyright (c) 2023-2024 Ideas on Board Oy 7 + */ 8 + 9 + #include <linux/delay.h> 10 + #include <linux/moduleparam.h> 11 + #include <linux/pm_runtime.h> 12 + #include <linux/seq_file.h> 13 + 14 + #include <media/videobuf2-dma-contig.h> 15 + 16 + #include "cfe.h" 17 + #include "csi2.h" 18 + 19 + #include "cfe-trace.h" 20 + 21 + static bool csi2_track_errors; 22 + module_param_named(track_csi2_errors, csi2_track_errors, bool, 0); 23 + MODULE_PARM_DESC(track_csi2_errors, "track csi-2 errors"); 24 + 25 + #define csi2_dbg(csi2, fmt, arg...) dev_dbg((csi2)->v4l2_dev->dev, fmt, ##arg) 26 + #define csi2_err(csi2, fmt, arg...) dev_err((csi2)->v4l2_dev->dev, fmt, ##arg) 27 + 28 + /* CSI2-DMA registers */ 29 + #define CSI2_STATUS 0x000 30 + #define CSI2_QOS 0x004 31 + #define CSI2_DISCARDS_OVERFLOW 0x008 32 + #define CSI2_DISCARDS_INACTIVE 0x00c 33 + #define CSI2_DISCARDS_UNMATCHED 0x010 34 + #define CSI2_DISCARDS_LEN_LIMIT 0x014 35 + 36 + #define CSI2_DISCARDS_AMOUNT_SHIFT 0 37 + #define CSI2_DISCARDS_AMOUNT_MASK GENMASK(23, 0) 38 + #define CSI2_DISCARDS_DT_SHIFT 24 39 + #define CSI2_DISCARDS_DT_MASK GENMASK(29, 24) 40 + #define CSI2_DISCARDS_VC_SHIFT 30 41 + #define CSI2_DISCARDS_VC_MASK GENMASK(31, 30) 42 + 43 + #define CSI2_LLEV_PANICS 0x018 44 + #define CSI2_ULEV_PANICS 0x01c 45 + #define CSI2_IRQ_MASK 0x020 46 + #define CSI2_IRQ_MASK_IRQ_OVERFLOW BIT(0) 47 + #define CSI2_IRQ_MASK_IRQ_DISCARD_OVERFLOW BIT(1) 48 + #define CSI2_IRQ_MASK_IRQ_DISCARD_LENGTH_LIMIT BIT(2) 49 + #define CSI2_IRQ_MASK_IRQ_DISCARD_UNMATCHED BIT(3) 50 + #define CSI2_IRQ_MASK_IRQ_DISCARD_INACTIVE BIT(4) 51 + #define CSI2_IRQ_MASK_IRQ_ALL \ 52 + (CSI2_IRQ_MASK_IRQ_OVERFLOW | CSI2_IRQ_MASK_IRQ_DISCARD_OVERFLOW | \ 53 + CSI2_IRQ_MASK_IRQ_DISCARD_LENGTH_LIMIT | \ 54 + CSI2_IRQ_MASK_IRQ_DISCARD_UNMATCHED | \ 55 + CSI2_IRQ_MASK_IRQ_DISCARD_INACTIVE) 56 + 57 + #define CSI2_CTRL 0x024 58 + #define CSI2_CH_CTRL(x) ((x) * 0x40 + 0x28) 59 + #define CSI2_CH_ADDR0(x) ((x) * 0x40 + 0x2c) 60 + #define CSI2_CH_ADDR1(x) ((x) * 0x40 + 0x3c) 61 + #define CSI2_CH_STRIDE(x) ((x) * 0x40 + 0x30) 62 + #define CSI2_CH_LENGTH(x) ((x) * 0x40 + 0x34) 63 + #define CSI2_CH_DEBUG(x) ((x) * 0x40 + 0x38) 64 + #define CSI2_CH_FRAME_SIZE(x) ((x) * 0x40 + 0x40) 65 + #define CSI2_CH_COMP_CTRL(x) ((x) * 0x40 + 0x44) 66 + #define CSI2_CH_FE_FRAME_ID(x) ((x) * 0x40 + 0x48) 67 + 68 + /* CSI2_STATUS */ 69 + #define CSI2_STATUS_IRQ_FS(x) (BIT(0) << (x)) 70 + #define CSI2_STATUS_IRQ_FE(x) (BIT(4) << (x)) 71 + #define CSI2_STATUS_IRQ_FE_ACK(x) (BIT(8) << (x)) 72 + #define CSI2_STATUS_IRQ_LE(x) (BIT(12) << (x)) 73 + #define CSI2_STATUS_IRQ_LE_ACK(x) (BIT(16) << (x)) 74 + #define CSI2_STATUS_IRQ_CH_MASK(x) \ 75 + (CSI2_STATUS_IRQ_FS(x) | CSI2_STATUS_IRQ_FE(x) | \ 76 + CSI2_STATUS_IRQ_FE_ACK(x) | CSI2_STATUS_IRQ_LE(x) | \ 77 + CSI2_STATUS_IRQ_LE_ACK(x)) 78 + #define CSI2_STATUS_IRQ_OVERFLOW BIT(20) 79 + #define CSI2_STATUS_IRQ_DISCARD_OVERFLOW BIT(21) 80 + #define CSI2_STATUS_IRQ_DISCARD_LEN_LIMIT BIT(22) 81 + #define CSI2_STATUS_IRQ_DISCARD_UNMATCHED BIT(23) 82 + #define CSI2_STATUS_IRQ_DISCARD_INACTIVE BIT(24) 83 + 84 + /* CSI2_CTRL */ 85 + #define CSI2_CTRL_EOP_IS_EOL BIT(0) 86 + 87 + /* CSI2_CH_CTRL */ 88 + #define CSI2_CH_CTRL_DMA_EN BIT(0) 89 + #define CSI2_CH_CTRL_FORCE BIT(3) 90 + #define CSI2_CH_CTRL_AUTO_ARM BIT(4) 91 + #define CSI2_CH_CTRL_IRQ_EN_FS BIT(13) 92 + #define CSI2_CH_CTRL_IRQ_EN_FE BIT(14) 93 + #define CSI2_CH_CTRL_IRQ_EN_FE_ACK BIT(15) 94 + #define CSI2_CH_CTRL_IRQ_EN_LE BIT(16) 95 + #define CSI2_CH_CTRL_IRQ_EN_LE_ACK BIT(17) 96 + #define CSI2_CH_CTRL_FLUSH_FE BIT(28) 97 + #define CSI2_CH_CTRL_PACK_LINE BIT(29) 98 + #define CSI2_CH_CTRL_PACK_BYTES BIT(30) 99 + #define CSI2_CH_CTRL_CH_MODE_MASK GENMASK(2, 1) 100 + #define CSI2_CH_CTRL_VC_MASK GENMASK(6, 5) 101 + #define CSI2_CH_CTRL_DT_MASK GENMASK(12, 7) 102 + #define CSI2_CH_CTRL_LC_MASK GENMASK(27, 18) 103 + 104 + /* CHx_COMPRESSION_CONTROL */ 105 + #define CSI2_CH_COMP_CTRL_OFFSET_MASK GENMASK(15, 0) 106 + #define CSI2_CH_COMP_CTRL_SHIFT_MASK GENMASK(19, 16) 107 + #define CSI2_CH_COMP_CTRL_MODE_MASK GENMASK(25, 24) 108 + 109 + static inline u32 csi2_reg_read(struct csi2_device *csi2, u32 offset) 110 + { 111 + return readl(csi2->base + offset); 112 + } 113 + 114 + static inline void csi2_reg_write(struct csi2_device *csi2, u32 offset, u32 val) 115 + { 116 + writel(val, csi2->base + offset); 117 + } 118 + 119 + static inline void set_field(u32 *valp, u32 field, u32 mask) 120 + { 121 + u32 val = *valp; 122 + 123 + val &= ~mask; 124 + val |= (field << __ffs(mask)) & mask; 125 + *valp = val; 126 + } 127 + 128 + static int csi2_regs_show(struct seq_file *s, void *data) 129 + { 130 + struct csi2_device *csi2 = s->private; 131 + int ret; 132 + 133 + ret = pm_runtime_resume_and_get(csi2->v4l2_dev->dev); 134 + if (ret) 135 + return ret; 136 + 137 + #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", csi2_reg_read(csi2, reg)) 138 + #define DUMP_CH(idx, reg) seq_printf(s, #reg "(%u) \t0x%08x\n", idx, \ 139 + csi2_reg_read(csi2, reg(idx))) 140 + 141 + DUMP(CSI2_STATUS); 142 + DUMP(CSI2_DISCARDS_OVERFLOW); 143 + DUMP(CSI2_DISCARDS_INACTIVE); 144 + DUMP(CSI2_DISCARDS_UNMATCHED); 145 + DUMP(CSI2_DISCARDS_LEN_LIMIT); 146 + DUMP(CSI2_LLEV_PANICS); 147 + DUMP(CSI2_ULEV_PANICS); 148 + DUMP(CSI2_IRQ_MASK); 149 + DUMP(CSI2_CTRL); 150 + 151 + for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; ++i) { 152 + DUMP_CH(i, CSI2_CH_CTRL); 153 + DUMP_CH(i, CSI2_CH_ADDR0); 154 + DUMP_CH(i, CSI2_CH_ADDR1); 155 + DUMP_CH(i, CSI2_CH_STRIDE); 156 + DUMP_CH(i, CSI2_CH_LENGTH); 157 + DUMP_CH(i, CSI2_CH_DEBUG); 158 + DUMP_CH(i, CSI2_CH_FRAME_SIZE); 159 + DUMP_CH(i, CSI2_CH_COMP_CTRL); 160 + DUMP_CH(i, CSI2_CH_FE_FRAME_ID); 161 + } 162 + 163 + #undef DUMP 164 + #undef DUMP_CH 165 + 166 + pm_runtime_put(csi2->v4l2_dev->dev); 167 + 168 + return 0; 169 + } 170 + 171 + DEFINE_SHOW_ATTRIBUTE(csi2_regs); 172 + 173 + static int csi2_errors_show(struct seq_file *s, void *data) 174 + { 175 + struct csi2_device *csi2 = s->private; 176 + unsigned long flags; 177 + u32 discards_table[DISCARDS_TABLE_NUM_VCS][DISCARDS_TABLE_NUM_ENTRIES]; 178 + u32 discards_dt_table[DISCARDS_TABLE_NUM_ENTRIES]; 179 + u32 overflows; 180 + 181 + spin_lock_irqsave(&csi2->errors_lock, flags); 182 + 183 + memcpy(discards_table, csi2->discards_table, sizeof(discards_table)); 184 + memcpy(discards_dt_table, csi2->discards_dt_table, 185 + sizeof(discards_dt_table)); 186 + overflows = csi2->overflows; 187 + 188 + csi2->overflows = 0; 189 + memset(csi2->discards_table, 0, sizeof(discards_table)); 190 + memset(csi2->discards_dt_table, 0, sizeof(discards_dt_table)); 191 + 192 + spin_unlock_irqrestore(&csi2->errors_lock, flags); 193 + 194 + seq_printf(s, "Overflows %u\n", overflows); 195 + seq_puts(s, "Discards:\n"); 196 + seq_puts(s, "VC OVLF LEN UNMATCHED INACTIVE\n"); 197 + 198 + for (unsigned int vc = 0; vc < DISCARDS_TABLE_NUM_VCS; ++vc) { 199 + seq_printf(s, "%u %10u %10u %10u %10u\n", vc, 200 + discards_table[vc][DISCARDS_TABLE_OVERFLOW], 201 + discards_table[vc][DISCARDS_TABLE_LENGTH_LIMIT], 202 + discards_table[vc][DISCARDS_TABLE_UNMATCHED], 203 + discards_table[vc][DISCARDS_TABLE_INACTIVE]); 204 + } 205 + 206 + seq_printf(s, "Last DT %10u %10u %10u %10u\n", 207 + discards_dt_table[DISCARDS_TABLE_OVERFLOW], 208 + discards_dt_table[DISCARDS_TABLE_LENGTH_LIMIT], 209 + discards_dt_table[DISCARDS_TABLE_UNMATCHED], 210 + discards_dt_table[DISCARDS_TABLE_INACTIVE]); 211 + 212 + return 0; 213 + } 214 + 215 + DEFINE_SHOW_ATTRIBUTE(csi2_errors); 216 + 217 + static void csi2_isr_handle_errors(struct csi2_device *csi2, u32 status) 218 + { 219 + spin_lock(&csi2->errors_lock); 220 + 221 + if (status & CSI2_STATUS_IRQ_OVERFLOW) 222 + csi2->overflows++; 223 + 224 + for (unsigned int i = 0; i < DISCARDS_TABLE_NUM_ENTRIES; ++i) { 225 + static const u32 discard_bits[] = { 226 + CSI2_STATUS_IRQ_DISCARD_OVERFLOW, 227 + CSI2_STATUS_IRQ_DISCARD_LEN_LIMIT, 228 + CSI2_STATUS_IRQ_DISCARD_UNMATCHED, 229 + CSI2_STATUS_IRQ_DISCARD_INACTIVE, 230 + }; 231 + static const u8 discard_regs[] = { 232 + CSI2_DISCARDS_OVERFLOW, 233 + CSI2_DISCARDS_LEN_LIMIT, 234 + CSI2_DISCARDS_UNMATCHED, 235 + CSI2_DISCARDS_INACTIVE, 236 + }; 237 + u32 amount; 238 + u8 dt, vc; 239 + u32 v; 240 + 241 + if (!(status & discard_bits[i])) 242 + continue; 243 + 244 + v = csi2_reg_read(csi2, discard_regs[i]); 245 + csi2_reg_write(csi2, discard_regs[i], 0); 246 + 247 + amount = (v & CSI2_DISCARDS_AMOUNT_MASK) >> 248 + CSI2_DISCARDS_AMOUNT_SHIFT; 249 + dt = (v & CSI2_DISCARDS_DT_MASK) >> CSI2_DISCARDS_DT_SHIFT; 250 + vc = (v & CSI2_DISCARDS_VC_MASK) >> CSI2_DISCARDS_VC_SHIFT; 251 + 252 + csi2->discards_table[vc][i] += amount; 253 + csi2->discards_dt_table[i] = dt; 254 + } 255 + 256 + spin_unlock(&csi2->errors_lock); 257 + } 258 + 259 + void csi2_isr(struct csi2_device *csi2, bool *sof, bool *eof) 260 + { 261 + u32 status; 262 + 263 + status = csi2_reg_read(csi2, CSI2_STATUS); 264 + 265 + /* Write value back to clear the interrupts */ 266 + csi2_reg_write(csi2, CSI2_STATUS, status); 267 + 268 + for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) { 269 + u32 dbg; 270 + 271 + if ((status & CSI2_STATUS_IRQ_CH_MASK(i)) == 0) 272 + continue; 273 + 274 + dbg = csi2_reg_read(csi2, CSI2_CH_DEBUG(i)); 275 + 276 + trace_csi2_irq(i, status, dbg); 277 + 278 + sof[i] = !!(status & CSI2_STATUS_IRQ_FS(i)); 279 + eof[i] = !!(status & CSI2_STATUS_IRQ_FE_ACK(i)); 280 + } 281 + 282 + if (csi2_track_errors) 283 + csi2_isr_handle_errors(csi2, status); 284 + } 285 + 286 + void csi2_set_buffer(struct csi2_device *csi2, unsigned int channel, 287 + dma_addr_t dmaaddr, unsigned int stride, unsigned int size) 288 + { 289 + u64 addr = dmaaddr; 290 + /* 291 + * ADDRESS0 must be written last as it triggers the double buffering 292 + * mechanism for all buffer registers within the hardware. 293 + */ 294 + addr >>= 4; 295 + csi2_reg_write(csi2, CSI2_CH_LENGTH(channel), size >> 4); 296 + csi2_reg_write(csi2, CSI2_CH_STRIDE(channel), stride >> 4); 297 + csi2_reg_write(csi2, CSI2_CH_ADDR1(channel), addr >> 32); 298 + csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), addr & 0xffffffff); 299 + } 300 + 301 + void csi2_set_compression(struct csi2_device *csi2, unsigned int channel, 302 + enum csi2_compression_mode mode, unsigned int shift, 303 + unsigned int offset) 304 + { 305 + u32 compression = 0; 306 + 307 + set_field(&compression, CSI2_CH_COMP_CTRL_OFFSET_MASK, offset); 308 + set_field(&compression, CSI2_CH_COMP_CTRL_SHIFT_MASK, shift); 309 + set_field(&compression, CSI2_CH_COMP_CTRL_MODE_MASK, mode); 310 + csi2_reg_write(csi2, CSI2_CH_COMP_CTRL(channel), compression); 311 + } 312 + 313 + void csi2_start_channel(struct csi2_device *csi2, unsigned int channel, 314 + enum csi2_mode mode, bool auto_arm, bool pack_bytes, 315 + unsigned int width, unsigned int height, 316 + u8 vc, u8 dt) 317 + { 318 + u32 ctrl; 319 + 320 + csi2_dbg(csi2, "%s [%u]\n", __func__, channel); 321 + 322 + csi2_reg_write(csi2, CSI2_CH_CTRL(channel), 0); 323 + csi2_reg_write(csi2, CSI2_CH_DEBUG(channel), 0); 324 + csi2_reg_write(csi2, CSI2_STATUS, CSI2_STATUS_IRQ_CH_MASK(channel)); 325 + 326 + /* Enable channel and FS/FE interrupts. */ 327 + ctrl = CSI2_CH_CTRL_DMA_EN | CSI2_CH_CTRL_IRQ_EN_FS | 328 + CSI2_CH_CTRL_IRQ_EN_FE_ACK | CSI2_CH_CTRL_PACK_LINE; 329 + /* PACK_BYTES ensures no striding for embedded data. */ 330 + if (pack_bytes) 331 + ctrl |= CSI2_CH_CTRL_PACK_BYTES; 332 + 333 + if (auto_arm) 334 + ctrl |= CSI2_CH_CTRL_AUTO_ARM; 335 + 336 + if (width && height) { 337 + set_field(&ctrl, mode, CSI2_CH_CTRL_CH_MODE_MASK); 338 + csi2_reg_write(csi2, CSI2_CH_FRAME_SIZE(channel), 339 + (height << 16) | width); 340 + } else { 341 + set_field(&ctrl, 0x0, CSI2_CH_CTRL_CH_MODE_MASK); 342 + csi2_reg_write(csi2, CSI2_CH_FRAME_SIZE(channel), 0); 343 + } 344 + 345 + set_field(&ctrl, vc, CSI2_CH_CTRL_VC_MASK); 346 + set_field(&ctrl, dt, CSI2_CH_CTRL_DT_MASK); 347 + csi2_reg_write(csi2, CSI2_CH_CTRL(channel), ctrl); 348 + csi2->num_lines[channel] = height; 349 + } 350 + 351 + void csi2_stop_channel(struct csi2_device *csi2, unsigned int channel) 352 + { 353 + csi2_dbg(csi2, "%s [%u]\n", __func__, channel); 354 + 355 + /* Channel disable. Use FORCE to allow stopping mid-frame. */ 356 + csi2_reg_write(csi2, CSI2_CH_CTRL(channel), CSI2_CH_CTRL_FORCE); 357 + /* Latch the above change by writing to the ADDR0 register. */ 358 + csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), 0); 359 + /* Write this again, the HW needs it! */ 360 + csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), 0); 361 + } 362 + 363 + void csi2_open_rx(struct csi2_device *csi2) 364 + { 365 + csi2_reg_write(csi2, CSI2_IRQ_MASK, 366 + csi2_track_errors ? CSI2_IRQ_MASK_IRQ_ALL : 0); 367 + 368 + dphy_start(&csi2->dphy); 369 + 370 + csi2_reg_write(csi2, CSI2_CTRL, CSI2_CTRL_EOP_IS_EOL); 371 + } 372 + 373 + void csi2_close_rx(struct csi2_device *csi2) 374 + { 375 + dphy_stop(&csi2->dphy); 376 + 377 + csi2_reg_write(csi2, CSI2_IRQ_MASK, 0); 378 + } 379 + 380 + static int csi2_init_state(struct v4l2_subdev *sd, 381 + struct v4l2_subdev_state *state) 382 + { 383 + struct v4l2_subdev_route routes[] = { { 384 + .sink_pad = CSI2_PAD_SINK, 385 + .sink_stream = 0, 386 + .source_pad = CSI2_PAD_FIRST_SOURCE, 387 + .source_stream = 0, 388 + .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, 389 + } }; 390 + 391 + struct v4l2_subdev_krouting routing = { 392 + .num_routes = ARRAY_SIZE(routes), 393 + .routes = routes, 394 + }; 395 + 396 + int ret; 397 + 398 + ret = v4l2_subdev_set_routing_with_fmt(sd, state, &routing, 399 + &cfe_default_format); 400 + if (ret) 401 + return ret; 402 + 403 + return 0; 404 + } 405 + 406 + static int csi2_pad_set_fmt(struct v4l2_subdev *sd, 407 + struct v4l2_subdev_state *state, 408 + struct v4l2_subdev_format *format) 409 + { 410 + if (format->pad == CSI2_PAD_SINK) { 411 + /* Store the sink format and propagate it to the source. */ 412 + 413 + const struct cfe_fmt *cfe_fmt; 414 + 415 + cfe_fmt = find_format_by_code(format->format.code); 416 + if (!cfe_fmt) { 417 + cfe_fmt = find_format_by_code(MEDIA_BUS_FMT_SRGGB10_1X10); 418 + format->format.code = cfe_fmt->code; 419 + } 420 + 421 + struct v4l2_mbus_framefmt *fmt; 422 + 423 + fmt = v4l2_subdev_state_get_format(state, format->pad, 424 + format->stream); 425 + if (!fmt) 426 + return -EINVAL; 427 + 428 + *fmt = format->format; 429 + 430 + fmt = v4l2_subdev_state_get_opposite_stream_format(state, 431 + format->pad, 432 + format->stream); 433 + if (!fmt) 434 + return -EINVAL; 435 + 436 + format->format.field = V4L2_FIELD_NONE; 437 + 438 + *fmt = format->format; 439 + } else { 440 + /* Only allow changing the source pad mbus code. */ 441 + 442 + struct v4l2_mbus_framefmt *sink_fmt, *source_fmt; 443 + u32 sink_code; 444 + u32 code; 445 + 446 + sink_fmt = v4l2_subdev_state_get_opposite_stream_format(state, 447 + format->pad, 448 + format->stream); 449 + if (!sink_fmt) 450 + return -EINVAL; 451 + 452 + source_fmt = v4l2_subdev_state_get_format(state, format->pad, 453 + format->stream); 454 + if (!source_fmt) 455 + return -EINVAL; 456 + 457 + sink_code = sink_fmt->code; 458 + code = format->format.code; 459 + 460 + /* 461 + * Only allow changing the mbus code to: 462 + * - The sink's mbus code 463 + * - The 16-bit version of the sink's mbus code 464 + * - The compressed version of the sink's mbus code 465 + */ 466 + if (code == sink_code || 467 + code == cfe_find_16bit_code(sink_code) || 468 + code == cfe_find_compressed_code(sink_code)) 469 + source_fmt->code = code; 470 + 471 + format->format.code = source_fmt->code; 472 + } 473 + 474 + return 0; 475 + } 476 + 477 + static int csi2_set_routing(struct v4l2_subdev *sd, 478 + struct v4l2_subdev_state *state, 479 + enum v4l2_subdev_format_whence which, 480 + struct v4l2_subdev_krouting *routing) 481 + { 482 + int ret; 483 + 484 + ret = v4l2_subdev_routing_validate(sd, routing, 485 + V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 | 486 + V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING); 487 + if (ret) 488 + return ret; 489 + 490 + /* Only stream ID 0 allowed on source pads */ 491 + for (unsigned int i = 0; i < routing->num_routes; ++i) { 492 + const struct v4l2_subdev_route *route = &routing->routes[i]; 493 + 494 + if (route->source_stream != 0) 495 + return -EINVAL; 496 + } 497 + 498 + ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, 499 + &cfe_default_format); 500 + if (ret) 501 + return ret; 502 + 503 + return 0; 504 + } 505 + 506 + static const struct v4l2_subdev_pad_ops csi2_subdev_pad_ops = { 507 + .get_fmt = v4l2_subdev_get_fmt, 508 + .set_fmt = csi2_pad_set_fmt, 509 + .set_routing = csi2_set_routing, 510 + .link_validate = v4l2_subdev_link_validate_default, 511 + }; 512 + 513 + static const struct media_entity_operations csi2_entity_ops = { 514 + .link_validate = v4l2_subdev_link_validate, 515 + .has_pad_interdep = v4l2_subdev_has_pad_interdep, 516 + }; 517 + 518 + static const struct v4l2_subdev_ops csi2_subdev_ops = { 519 + .pad = &csi2_subdev_pad_ops, 520 + }; 521 + 522 + static const struct v4l2_subdev_internal_ops csi2_internal_ops = { 523 + .init_state = csi2_init_state, 524 + }; 525 + 526 + int csi2_init(struct csi2_device *csi2, struct dentry *debugfs) 527 + { 528 + unsigned int ret; 529 + 530 + spin_lock_init(&csi2->errors_lock); 531 + 532 + csi2->dphy.dev = csi2->v4l2_dev->dev; 533 + dphy_probe(&csi2->dphy); 534 + 535 + debugfs_create_file("csi2_regs", 0440, debugfs, csi2, &csi2_regs_fops); 536 + 537 + if (csi2_track_errors) 538 + debugfs_create_file("csi2_errors", 0440, debugfs, csi2, 539 + &csi2_errors_fops); 540 + 541 + csi2->pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 542 + 543 + for (unsigned int i = CSI2_PAD_FIRST_SOURCE; 544 + i < CSI2_PAD_FIRST_SOURCE + CSI2_PAD_NUM_SOURCES; i++) 545 + csi2->pad[i].flags = MEDIA_PAD_FL_SOURCE; 546 + 547 + ret = media_entity_pads_init(&csi2->sd.entity, ARRAY_SIZE(csi2->pad), 548 + csi2->pad); 549 + if (ret) 550 + return ret; 551 + 552 + /* Initialize subdev */ 553 + v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops); 554 + csi2->sd.internal_ops = &csi2_internal_ops; 555 + csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; 556 + csi2->sd.entity.ops = &csi2_entity_ops; 557 + csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS; 558 + csi2->sd.owner = THIS_MODULE; 559 + snprintf(csi2->sd.name, sizeof(csi2->sd.name), "csi2"); 560 + 561 + ret = v4l2_subdev_init_finalize(&csi2->sd); 562 + if (ret) 563 + goto err_entity_cleanup; 564 + 565 + ret = v4l2_device_register_subdev(csi2->v4l2_dev, &csi2->sd); 566 + if (ret) { 567 + csi2_err(csi2, "Failed register csi2 subdev (%d)\n", ret); 568 + goto err_subdev_cleanup; 569 + } 570 + 571 + return 0; 572 + 573 + err_subdev_cleanup: 574 + v4l2_subdev_cleanup(&csi2->sd); 575 + err_entity_cleanup: 576 + media_entity_cleanup(&csi2->sd.entity); 577 + 578 + return ret; 579 + } 580 + 581 + void csi2_uninit(struct csi2_device *csi2) 582 + { 583 + v4l2_device_unregister_subdev(&csi2->sd); 584 + v4l2_subdev_cleanup(&csi2->sd); 585 + media_entity_cleanup(&csi2->sd.entity); 586 + }
+89
drivers/media/platform/raspberrypi/rp1-cfe/csi2.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * RP1 CSI-2 Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + * Copyright (c) 2023-2024 Ideas on Board Oy 7 + */ 8 + 9 + #ifndef _RP1_CSI2_ 10 + #define _RP1_CSI2_ 11 + 12 + #include <linux/debugfs.h> 13 + #include <linux/io.h> 14 + #include <linux/types.h> 15 + #include <media/v4l2-device.h> 16 + #include <media/v4l2-subdev.h> 17 + 18 + #include "dphy.h" 19 + 20 + #define CSI2_NUM_CHANNELS 4 21 + 22 + #define CSI2_PAD_SINK 0 23 + #define CSI2_PAD_FIRST_SOURCE 1 24 + #define CSI2_PAD_NUM_SOURCES 4 25 + #define CSI2_NUM_PADS 5 26 + 27 + #define DISCARDS_TABLE_NUM_VCS 4 28 + 29 + enum csi2_mode { 30 + CSI2_MODE_NORMAL = 0, 31 + CSI2_MODE_REMAP = 1, 32 + CSI2_MODE_COMPRESSED = 2, 33 + CSI2_MODE_FE_STREAMING = 3, 34 + }; 35 + 36 + enum csi2_compression_mode { 37 + CSI2_COMPRESSION_DELTA = 1, 38 + CSI2_COMPRESSION_SIMPLE = 2, 39 + CSI2_COMPRESSION_COMBINED = 3, 40 + }; 41 + 42 + enum discards_table_index { 43 + DISCARDS_TABLE_OVERFLOW = 0, 44 + DISCARDS_TABLE_LENGTH_LIMIT, 45 + DISCARDS_TABLE_UNMATCHED, 46 + DISCARDS_TABLE_INACTIVE, 47 + DISCARDS_TABLE_NUM_ENTRIES, 48 + }; 49 + 50 + struct csi2_device { 51 + /* Parent V4l2 device */ 52 + struct v4l2_device *v4l2_dev; 53 + 54 + void __iomem *base; 55 + 56 + struct dphy_data dphy; 57 + 58 + enum v4l2_mbus_type bus_type; 59 + unsigned int bus_flags; 60 + unsigned int num_lines[CSI2_NUM_CHANNELS]; 61 + 62 + struct media_pad pad[CSI2_NUM_PADS]; 63 + struct v4l2_subdev sd; 64 + 65 + /* lock for csi2 errors counters */ 66 + spinlock_t errors_lock; 67 + u32 overflows; 68 + u32 discards_table[DISCARDS_TABLE_NUM_VCS][DISCARDS_TABLE_NUM_ENTRIES]; 69 + u32 discards_dt_table[DISCARDS_TABLE_NUM_ENTRIES]; 70 + }; 71 + 72 + void csi2_isr(struct csi2_device *csi2, bool *sof, bool *eof); 73 + void csi2_set_buffer(struct csi2_device *csi2, unsigned int channel, 74 + dma_addr_t dmaaddr, unsigned int stride, 75 + unsigned int size); 76 + void csi2_set_compression(struct csi2_device *csi2, unsigned int channel, 77 + enum csi2_compression_mode mode, unsigned int shift, 78 + unsigned int offset); 79 + void csi2_start_channel(struct csi2_device *csi2, unsigned int channel, 80 + enum csi2_mode mode, bool auto_arm, 81 + bool pack_bytes, unsigned int width, 82 + unsigned int height, u8 vc, u8 dt); 83 + void csi2_stop_channel(struct csi2_device *csi2, unsigned int channel); 84 + void csi2_open_rx(struct csi2_device *csi2); 85 + void csi2_close_rx(struct csi2_device *csi2); 86 + int csi2_init(struct csi2_device *csi2, struct dentry *debugfs); 87 + void csi2_uninit(struct csi2_device *csi2); 88 + 89 + #endif
+181
drivers/media/platform/raspberrypi/rp1-cfe/dphy.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * RP1 CSI-2 Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + * Copyright (c) 2023-2024 Ideas on Board Oy 7 + */ 8 + 9 + #include <linux/delay.h> 10 + #include <linux/pm_runtime.h> 11 + 12 + #include "dphy.h" 13 + 14 + #define dphy_dbg(dphy, fmt, arg...) dev_dbg((dphy)->dev, fmt, ##arg) 15 + #define dphy_err(dphy, fmt, arg...) dev_err((dphy)->dev, fmt, ##arg) 16 + 17 + /* DW dphy Host registers */ 18 + #define DPHY_VERSION 0x000 19 + #define DPHY_N_LANES 0x004 20 + #define DPHY_RESETN 0x008 21 + #define DPHY_PHY_SHUTDOWNZ 0x040 22 + #define DPHY_PHY_RSTZ 0x044 23 + #define DPHY_PHY_RX 0x048 24 + #define DPHY_PHY_STOPSTATE 0x04c 25 + #define DPHY_PHY_TST_CTRL0 0x050 26 + #define DPHY_PHY_TST_CTRL1 0x054 27 + #define DPHY_PHY2_TST_CTRL0 0x058 28 + #define DPHY_PHY2_TST_CTRL1 0x05c 29 + 30 + /* DW dphy Host Transactions */ 31 + #define DPHY_HS_RX_CTRL_LANE0_OFFSET 0x44 32 + #define DPHY_PLL_INPUT_DIV_OFFSET 0x17 33 + #define DPHY_PLL_LOOP_DIV_OFFSET 0x18 34 + #define DPHY_PLL_DIV_CTRL_OFFSET 0x19 35 + 36 + static u32 dw_csi2_host_read(struct dphy_data *dphy, u32 offset) 37 + { 38 + return readl(dphy->base + offset); 39 + } 40 + 41 + static void dw_csi2_host_write(struct dphy_data *dphy, u32 offset, u32 data) 42 + { 43 + writel(data, dphy->base + offset); 44 + } 45 + 46 + static void set_tstclr(struct dphy_data *dphy, u32 val) 47 + { 48 + u32 ctrl0 = dw_csi2_host_read(dphy, DPHY_PHY_TST_CTRL0); 49 + 50 + dw_csi2_host_write(dphy, DPHY_PHY_TST_CTRL0, (ctrl0 & ~1) | val); 51 + } 52 + 53 + static void set_tstclk(struct dphy_data *dphy, u32 val) 54 + { 55 + u32 ctrl0 = dw_csi2_host_read(dphy, DPHY_PHY_TST_CTRL0); 56 + 57 + dw_csi2_host_write(dphy, DPHY_PHY_TST_CTRL0, (ctrl0 & ~2) | (val << 1)); 58 + } 59 + 60 + static uint8_t get_tstdout(struct dphy_data *dphy) 61 + { 62 + u32 ctrl1 = dw_csi2_host_read(dphy, DPHY_PHY_TST_CTRL1); 63 + 64 + return ((ctrl1 >> 8) & 0xff); 65 + } 66 + 67 + static void set_testen(struct dphy_data *dphy, u32 val) 68 + { 69 + u32 ctrl1 = dw_csi2_host_read(dphy, DPHY_PHY_TST_CTRL1); 70 + 71 + dw_csi2_host_write(dphy, DPHY_PHY_TST_CTRL1, 72 + (ctrl1 & ~(1 << 16)) | (val << 16)); 73 + } 74 + 75 + static void set_testdin(struct dphy_data *dphy, u32 val) 76 + { 77 + u32 ctrl1 = dw_csi2_host_read(dphy, DPHY_PHY_TST_CTRL1); 78 + 79 + dw_csi2_host_write(dphy, DPHY_PHY_TST_CTRL1, (ctrl1 & ~0xff) | val); 80 + } 81 + 82 + static uint8_t dphy_transaction(struct dphy_data *dphy, u8 test_code, 83 + uint8_t test_data) 84 + { 85 + /* See page 101 of the MIPI DPHY databook. */ 86 + set_tstclk(dphy, 1); 87 + set_testen(dphy, 0); 88 + set_testdin(dphy, test_code); 89 + set_testen(dphy, 1); 90 + set_tstclk(dphy, 0); 91 + set_testen(dphy, 0); 92 + set_testdin(dphy, test_data); 93 + set_tstclk(dphy, 1); 94 + return get_tstdout(dphy); 95 + } 96 + 97 + static void dphy_set_hsfreqrange(struct dphy_data *dphy, uint32_t mbps) 98 + { 99 + /* See Table 5-1 on page 65 of dphy databook */ 100 + static const u16 hsfreqrange_table[][2] = { 101 + { 89, 0b000000 }, { 99, 0b010000 }, { 109, 0b100000 }, 102 + { 129, 0b000001 }, { 139, 0b010001 }, { 149, 0b100001 }, 103 + { 169, 0b000010 }, { 179, 0b010010 }, { 199, 0b100010 }, 104 + { 219, 0b000011 }, { 239, 0b010011 }, { 249, 0b100011 }, 105 + { 269, 0b000100 }, { 299, 0b010100 }, { 329, 0b000101 }, 106 + { 359, 0b010101 }, { 399, 0b100101 }, { 449, 0b000110 }, 107 + { 499, 0b010110 }, { 549, 0b000111 }, { 599, 0b010111 }, 108 + { 649, 0b001000 }, { 699, 0b011000 }, { 749, 0b001001 }, 109 + { 799, 0b011001 }, { 849, 0b101001 }, { 899, 0b111001 }, 110 + { 949, 0b001010 }, { 999, 0b011010 }, { 1049, 0b101010 }, 111 + { 1099, 0b111010 }, { 1149, 0b001011 }, { 1199, 0b011011 }, 112 + { 1249, 0b101011 }, { 1299, 0b111011 }, { 1349, 0b001100 }, 113 + { 1399, 0b011100 }, { 1449, 0b101100 }, { 1500, 0b111100 }, 114 + }; 115 + unsigned int i; 116 + 117 + if (mbps < 80 || mbps > 1500) 118 + dphy_err(dphy, "DPHY: Datarate %u Mbps out of range\n", mbps); 119 + 120 + for (i = 0; i < ARRAY_SIZE(hsfreqrange_table) - 1; i++) { 121 + if (mbps <= hsfreqrange_table[i][0]) 122 + break; 123 + } 124 + 125 + dphy_transaction(dphy, DPHY_HS_RX_CTRL_LANE0_OFFSET, 126 + hsfreqrange_table[i][1] << 1); 127 + } 128 + 129 + static void dphy_init(struct dphy_data *dphy) 130 + { 131 + dw_csi2_host_write(dphy, DPHY_PHY_RSTZ, 0); 132 + dw_csi2_host_write(dphy, DPHY_PHY_SHUTDOWNZ, 0); 133 + set_tstclk(dphy, 1); 134 + set_testen(dphy, 0); 135 + set_tstclr(dphy, 1); 136 + usleep_range(15, 20); 137 + set_tstclr(dphy, 0); 138 + usleep_range(15, 20); 139 + 140 + dphy_set_hsfreqrange(dphy, dphy->dphy_rate); 141 + 142 + usleep_range(5, 10); 143 + dw_csi2_host_write(dphy, DPHY_PHY_SHUTDOWNZ, 1); 144 + usleep_range(5, 10); 145 + dw_csi2_host_write(dphy, DPHY_PHY_RSTZ, 1); 146 + } 147 + 148 + void dphy_start(struct dphy_data *dphy) 149 + { 150 + dphy_dbg(dphy, "%s: Link rate %u Mbps, %u data lanes\n", __func__, 151 + dphy->dphy_rate, dphy->active_lanes); 152 + 153 + dw_csi2_host_write(dphy, DPHY_N_LANES, (dphy->active_lanes - 1)); 154 + dphy_init(dphy); 155 + dw_csi2_host_write(dphy, DPHY_RESETN, 0xffffffff); 156 + usleep_range(10, 50); 157 + } 158 + 159 + void dphy_stop(struct dphy_data *dphy) 160 + { 161 + dphy_dbg(dphy, "%s\n", __func__); 162 + 163 + /* Set only one lane (lane 0) as active (ON) */ 164 + dw_csi2_host_write(dphy, DPHY_N_LANES, 0); 165 + dw_csi2_host_write(dphy, DPHY_RESETN, 0); 166 + } 167 + 168 + void dphy_probe(struct dphy_data *dphy) 169 + { 170 + u32 host_ver; 171 + u8 host_ver_major, host_ver_minor; 172 + 173 + host_ver = dw_csi2_host_read(dphy, DPHY_VERSION); 174 + host_ver_major = (u8)((host_ver >> 24) - '0'); 175 + host_ver_minor = (u8)((host_ver >> 16) - '0'); 176 + host_ver_minor = host_ver_minor * 10; 177 + host_ver_minor += (u8)((host_ver >> 8) - '0'); 178 + 179 + dphy_dbg(dphy, "DW dphy Host HW v%u.%u\n", host_ver_major, 180 + host_ver_minor); 181 + }
+27
drivers/media/platform/raspberrypi/rp1-cfe/dphy.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 4 + * Copyright (c) 2023-2024 Ideas on Board Oy 5 + */ 6 + 7 + #ifndef _RP1_DPHY_ 8 + #define _RP1_DPHY_ 9 + 10 + #include <linux/io.h> 11 + #include <linux/types.h> 12 + 13 + struct dphy_data { 14 + struct device *dev; 15 + 16 + void __iomem *base; 17 + 18 + u32 dphy_rate; 19 + u32 max_lanes; 20 + u32 active_lanes; 21 + }; 22 + 23 + void dphy_probe(struct dphy_data *dphy); 24 + void dphy_start(struct dphy_data *dphy); 25 + void dphy_stop(struct dphy_data *dphy); 26 + 27 + #endif
+605
drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * PiSP Front End Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + */ 7 + 8 + #include <linux/bitops.h> 9 + #include <linux/delay.h> 10 + #include <linux/moduleparam.h> 11 + #include <linux/pm_runtime.h> 12 + #include <linux/seq_file.h> 13 + 14 + #include <media/videobuf2-dma-contig.h> 15 + 16 + #include "cfe.h" 17 + #include "pisp-fe.h" 18 + 19 + #include "cfe-trace.h" 20 + 21 + #define FE_VERSION 0x000 22 + #define FE_CONTROL 0x004 23 + #define FE_STATUS 0x008 24 + #define FE_FRAME_STATUS 0x00c 25 + #define FE_ERROR_STATUS 0x010 26 + #define FE_OUTPUT_STATUS 0x014 27 + #define FE_INT_EN 0x018 28 + #define FE_INT_STATUS 0x01c 29 + 30 + /* CONTROL */ 31 + #define FE_CONTROL_QUEUE BIT(0) 32 + #define FE_CONTROL_ABORT BIT(1) 33 + #define FE_CONTROL_RESET BIT(2) 34 + #define FE_CONTROL_LATCH_REGS BIT(3) 35 + 36 + /* INT_EN / INT_STATUS */ 37 + #define FE_INT_EOF BIT(0) 38 + #define FE_INT_SOF BIT(1) 39 + #define FE_INT_LINES0 BIT(8) 40 + #define FE_INT_LINES1 BIT(9) 41 + #define FE_INT_STATS BIT(16) 42 + #define FE_INT_QREADY BIT(24) 43 + 44 + /* STATUS */ 45 + #define FE_STATUS_QUEUED BIT(0) 46 + #define FE_STATUS_WAITING BIT(1) 47 + #define FE_STATUS_ACTIVE BIT(2) 48 + 49 + #define PISP_FE_CONFIG_BASE_OFFSET 0x0040 50 + 51 + #define PISP_FE_ENABLE_STATS_CLUSTER \ 52 + (PISP_FE_ENABLE_STATS_CROP | PISP_FE_ENABLE_DECIMATE | \ 53 + PISP_FE_ENABLE_BLC | PISP_FE_ENABLE_CDAF_STATS | \ 54 + PISP_FE_ENABLE_AWB_STATS | PISP_FE_ENABLE_RGBY | \ 55 + PISP_FE_ENABLE_LSC | PISP_FE_ENABLE_AGC_STATS) 56 + 57 + #define PISP_FE_ENABLE_OUTPUT_CLUSTER(i) \ 58 + ((PISP_FE_ENABLE_CROP0 | PISP_FE_ENABLE_DOWNSCALE0 | \ 59 + PISP_FE_ENABLE_COMPRESS0 | PISP_FE_ENABLE_OUTPUT0) << (4 * (i))) 60 + 61 + struct pisp_fe_config_param { 62 + u32 dirty_flags; 63 + u32 dirty_flags_extra; 64 + size_t offset; 65 + size_t size; 66 + }; 67 + 68 + static const struct pisp_fe_config_param pisp_fe_config_map[] = { 69 + /* *_dirty_flag_extra types */ 70 + { 0, PISP_FE_DIRTY_GLOBAL, 71 + offsetof(struct pisp_fe_config, global), 72 + sizeof(struct pisp_fe_global_config) }, 73 + { 0, PISP_FE_DIRTY_FLOATING, 74 + offsetof(struct pisp_fe_config, floating_stats), 75 + sizeof(struct pisp_fe_floating_stats_config) }, 76 + { 0, PISP_FE_DIRTY_OUTPUT_AXI, 77 + offsetof(struct pisp_fe_config, output_axi), 78 + sizeof(struct pisp_fe_output_axi_config) }, 79 + /* *_dirty_flag types */ 80 + { PISP_FE_ENABLE_INPUT, 0, 81 + offsetof(struct pisp_fe_config, input), 82 + sizeof(struct pisp_fe_input_config) }, 83 + { PISP_FE_ENABLE_DECOMPRESS, 0, 84 + offsetof(struct pisp_fe_config, decompress), 85 + sizeof(struct pisp_decompress_config) }, 86 + { PISP_FE_ENABLE_DECOMPAND, 0, 87 + offsetof(struct pisp_fe_config, decompand), 88 + sizeof(struct pisp_fe_decompand_config) }, 89 + { PISP_FE_ENABLE_BLA, 0, 90 + offsetof(struct pisp_fe_config, bla), 91 + sizeof(struct pisp_bla_config) }, 92 + { PISP_FE_ENABLE_DPC, 0, 93 + offsetof(struct pisp_fe_config, dpc), 94 + sizeof(struct pisp_fe_dpc_config) }, 95 + { PISP_FE_ENABLE_STATS_CROP, 0, 96 + offsetof(struct pisp_fe_config, stats_crop), 97 + sizeof(struct pisp_fe_crop_config) }, 98 + { PISP_FE_ENABLE_BLC, 0, 99 + offsetof(struct pisp_fe_config, blc), 100 + sizeof(struct pisp_bla_config) }, 101 + { PISP_FE_ENABLE_CDAF_STATS, 0, 102 + offsetof(struct pisp_fe_config, cdaf_stats), 103 + sizeof(struct pisp_fe_cdaf_stats_config) }, 104 + { PISP_FE_ENABLE_AWB_STATS, 0, 105 + offsetof(struct pisp_fe_config, awb_stats), 106 + sizeof(struct pisp_fe_awb_stats_config) }, 107 + { PISP_FE_ENABLE_RGBY, 0, 108 + offsetof(struct pisp_fe_config, rgby), 109 + sizeof(struct pisp_fe_rgby_config) }, 110 + { PISP_FE_ENABLE_LSC, 0, 111 + offsetof(struct pisp_fe_config, lsc), 112 + sizeof(struct pisp_fe_lsc_config) }, 113 + { PISP_FE_ENABLE_AGC_STATS, 0, 114 + offsetof(struct pisp_fe_config, agc_stats), 115 + sizeof(struct pisp_agc_statistics) }, 116 + { PISP_FE_ENABLE_CROP0, 0, 117 + offsetof(struct pisp_fe_config, ch[0].crop), 118 + sizeof(struct pisp_fe_crop_config) }, 119 + { PISP_FE_ENABLE_DOWNSCALE0, 0, 120 + offsetof(struct pisp_fe_config, ch[0].downscale), 121 + sizeof(struct pisp_fe_downscale_config) }, 122 + { PISP_FE_ENABLE_COMPRESS0, 0, 123 + offsetof(struct pisp_fe_config, ch[0].compress), 124 + sizeof(struct pisp_compress_config) }, 125 + { PISP_FE_ENABLE_OUTPUT0, 0, 126 + offsetof(struct pisp_fe_config, ch[0].output), 127 + sizeof(struct pisp_fe_output_config) }, 128 + { PISP_FE_ENABLE_CROP1, 0, 129 + offsetof(struct pisp_fe_config, ch[1].crop), 130 + sizeof(struct pisp_fe_crop_config) }, 131 + { PISP_FE_ENABLE_DOWNSCALE1, 0, 132 + offsetof(struct pisp_fe_config, ch[1].downscale), 133 + sizeof(struct pisp_fe_downscale_config) }, 134 + { PISP_FE_ENABLE_COMPRESS1, 0, 135 + offsetof(struct pisp_fe_config, ch[1].compress), 136 + sizeof(struct pisp_compress_config) }, 137 + { PISP_FE_ENABLE_OUTPUT1, 0, 138 + offsetof(struct pisp_fe_config, ch[1].output), 139 + sizeof(struct pisp_fe_output_config) }, 140 + }; 141 + 142 + #define pisp_fe_dbg(fe, fmt, arg...) dev_dbg((fe)->v4l2_dev->dev, fmt, ##arg) 143 + #define pisp_fe_info(fe, fmt, arg...) dev_info((fe)->v4l2_dev->dev, fmt, ##arg) 144 + #define pisp_fe_err(fe, fmt, arg...) dev_err((fe)->v4l2_dev->dev, fmt, ##arg) 145 + 146 + static inline u32 pisp_fe_reg_read(struct pisp_fe_device *fe, u32 offset) 147 + { 148 + return readl(fe->base + offset); 149 + } 150 + 151 + static inline void pisp_fe_reg_write(struct pisp_fe_device *fe, u32 offset, 152 + u32 val) 153 + { 154 + writel(val, fe->base + offset); 155 + } 156 + 157 + static inline void pisp_fe_reg_write_relaxed(struct pisp_fe_device *fe, 158 + u32 offset, u32 val) 159 + { 160 + writel_relaxed(val, fe->base + offset); 161 + } 162 + 163 + static int pisp_fe_regs_show(struct seq_file *s, void *data) 164 + { 165 + struct pisp_fe_device *fe = s->private; 166 + int ret; 167 + 168 + ret = pm_runtime_resume_and_get(fe->v4l2_dev->dev); 169 + if (ret) 170 + return ret; 171 + 172 + pisp_fe_reg_write(fe, FE_CONTROL, FE_CONTROL_LATCH_REGS); 173 + 174 + #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", pisp_fe_reg_read(fe, reg)) 175 + DUMP(FE_VERSION); 176 + DUMP(FE_CONTROL); 177 + DUMP(FE_STATUS); 178 + DUMP(FE_FRAME_STATUS); 179 + DUMP(FE_ERROR_STATUS); 180 + DUMP(FE_OUTPUT_STATUS); 181 + DUMP(FE_INT_EN); 182 + DUMP(FE_INT_STATUS); 183 + #undef DUMP 184 + 185 + pm_runtime_put(fe->v4l2_dev->dev); 186 + 187 + return 0; 188 + } 189 + 190 + DEFINE_SHOW_ATTRIBUTE(pisp_fe_regs); 191 + 192 + static void pisp_fe_config_write(struct pisp_fe_device *fe, 193 + struct pisp_fe_config *config, 194 + unsigned int start_offset, unsigned int size) 195 + { 196 + const unsigned int max_offset = 197 + offsetof(struct pisp_fe_config, ch[PISP_FE_NUM_OUTPUTS]); 198 + unsigned int end_offset; 199 + u32 *cfg = (u32 *)config; 200 + 201 + start_offset = min(start_offset, max_offset); 202 + end_offset = min(start_offset + size, max_offset); 203 + 204 + cfg += start_offset >> 2; 205 + for (unsigned int i = start_offset; i < end_offset; i += 4, cfg++) 206 + pisp_fe_reg_write_relaxed(fe, PISP_FE_CONFIG_BASE_OFFSET + i, 207 + *cfg); 208 + } 209 + 210 + void pisp_fe_isr(struct pisp_fe_device *fe, bool *sof, bool *eof) 211 + { 212 + u32 status, int_status, out_status, frame_status, error_status; 213 + 214 + pisp_fe_reg_write(fe, FE_CONTROL, FE_CONTROL_LATCH_REGS); 215 + status = pisp_fe_reg_read(fe, FE_STATUS); 216 + out_status = pisp_fe_reg_read(fe, FE_OUTPUT_STATUS); 217 + frame_status = pisp_fe_reg_read(fe, FE_FRAME_STATUS); 218 + error_status = pisp_fe_reg_read(fe, FE_ERROR_STATUS); 219 + 220 + int_status = pisp_fe_reg_read(fe, FE_INT_STATUS); 221 + pisp_fe_reg_write(fe, FE_INT_STATUS, int_status); 222 + 223 + trace_fe_irq(status, out_status, frame_status, error_status, 224 + int_status); 225 + 226 + /* We do not report interrupts for the input/stream pad. */ 227 + for (unsigned int i = 0; i < FE_NUM_PADS - 1; i++) { 228 + sof[i] = !!(int_status & FE_INT_SOF); 229 + eof[i] = !!(int_status & FE_INT_EOF); 230 + } 231 + } 232 + 233 + static bool pisp_fe_validate_output(struct pisp_fe_config const *cfg, 234 + unsigned int c, struct v4l2_format const *f) 235 + { 236 + unsigned int wbytes; 237 + 238 + wbytes = cfg->ch[c].output.format.width; 239 + if (cfg->ch[c].output.format.format & PISP_IMAGE_FORMAT_BPS_MASK) 240 + wbytes *= 2; 241 + 242 + /* Check output image dimensions are nonzero and not too big */ 243 + if (cfg->ch[c].output.format.width < 2 || 244 + cfg->ch[c].output.format.height < 2 || 245 + cfg->ch[c].output.format.height > f->fmt.pix.height || 246 + cfg->ch[c].output.format.stride > f->fmt.pix.bytesperline || 247 + wbytes > f->fmt.pix.bytesperline) 248 + return false; 249 + 250 + /* Check for zero-sized crops, which could cause lockup */ 251 + if ((cfg->global.enables & PISP_FE_ENABLE_CROP(c)) && 252 + ((cfg->ch[c].crop.offset_x >= (cfg->input.format.width & ~1) || 253 + cfg->ch[c].crop.offset_y >= cfg->input.format.height || 254 + cfg->ch[c].crop.width < 2 || cfg->ch[c].crop.height < 2))) 255 + return false; 256 + 257 + if ((cfg->global.enables & PISP_FE_ENABLE_DOWNSCALE(c)) && 258 + (cfg->ch[c].downscale.output_width < 2 || 259 + cfg->ch[c].downscale.output_height < 2)) 260 + return false; 261 + 262 + return true; 263 + } 264 + 265 + static bool pisp_fe_validate_stats(struct pisp_fe_config const *cfg) 266 + { 267 + /* Check for zero-sized crop, which could cause lockup */ 268 + return (!(cfg->global.enables & PISP_FE_ENABLE_STATS_CROP) || 269 + (cfg->stats_crop.offset_x < (cfg->input.format.width & ~1) && 270 + cfg->stats_crop.offset_y < cfg->input.format.height && 271 + cfg->stats_crop.width >= 2 && cfg->stats_crop.height >= 2)); 272 + } 273 + 274 + int pisp_fe_validate_config(struct pisp_fe_device *fe, 275 + struct pisp_fe_config *cfg, 276 + struct v4l2_format const *f0, 277 + struct v4l2_format const *f1) 278 + { 279 + /* 280 + * Check the input is enabled, streaming and has nonzero size; 281 + * to avoid cases where the hardware might lock up or try to 282 + * read inputs from memory (which this driver doesn't support). 283 + */ 284 + if (!(cfg->global.enables & PISP_FE_ENABLE_INPUT) || 285 + cfg->input.streaming != 1 || cfg->input.format.width < 2 || 286 + cfg->input.format.height < 2) { 287 + pisp_fe_err(fe, "%s: Input config not valid", __func__); 288 + return -EINVAL; 289 + } 290 + 291 + for (unsigned int i = 0; i < PISP_FE_NUM_OUTPUTS; i++) { 292 + if (!(cfg->global.enables & PISP_FE_ENABLE_OUTPUT(i))) { 293 + if (cfg->global.enables & 294 + PISP_FE_ENABLE_OUTPUT_CLUSTER(i)) { 295 + pisp_fe_err(fe, "%s: Output %u not valid", 296 + __func__, i); 297 + return -EINVAL; 298 + } 299 + continue; 300 + } 301 + 302 + if (!pisp_fe_validate_output(cfg, i, i ? f1 : f0)) 303 + return -EINVAL; 304 + } 305 + 306 + if ((cfg->global.enables & PISP_FE_ENABLE_STATS_CLUSTER) && 307 + !pisp_fe_validate_stats(cfg)) { 308 + pisp_fe_err(fe, "%s: Stats config not valid", __func__); 309 + return -EINVAL; 310 + } 311 + 312 + return 0; 313 + } 314 + 315 + void pisp_fe_submit_job(struct pisp_fe_device *fe, struct vb2_buffer **vb2_bufs, 316 + struct pisp_fe_config *cfg) 317 + { 318 + u64 addr; 319 + u32 status; 320 + 321 + /* 322 + * Check output buffers exist and outputs are correctly configured. 323 + * If valid, set the buffer's DMA address; otherwise disable. 324 + */ 325 + for (unsigned int i = 0; i < PISP_FE_NUM_OUTPUTS; i++) { 326 + struct vb2_buffer *buf = vb2_bufs[FE_OUTPUT0_PAD + i]; 327 + 328 + if (!(cfg->global.enables & PISP_FE_ENABLE_OUTPUT(i))) 329 + continue; 330 + 331 + addr = vb2_dma_contig_plane_dma_addr(buf, 0); 332 + cfg->output_buffer[i].addr_lo = addr & 0xffffffff; 333 + cfg->output_buffer[i].addr_hi = addr >> 32; 334 + } 335 + 336 + if (vb2_bufs[FE_STATS_PAD]) { 337 + addr = vb2_dma_contig_plane_dma_addr(vb2_bufs[FE_STATS_PAD], 0); 338 + cfg->stats_buffer.addr_lo = addr & 0xffffffff; 339 + cfg->stats_buffer.addr_hi = addr >> 32; 340 + } 341 + 342 + /* Set up ILINES interrupts 3/4 of the way down each output */ 343 + cfg->ch[0].output.ilines = 344 + max(0x80u, (3u * cfg->ch[0].output.format.height) >> 2); 345 + cfg->ch[1].output.ilines = 346 + max(0x80u, (3u * cfg->ch[1].output.format.height) >> 2); 347 + 348 + /* 349 + * The hardware must have consumed the previous config by now. 350 + * This read of status also serves as a memory barrier before the 351 + * sequence of relaxed writes which follow. 352 + */ 353 + status = pisp_fe_reg_read(fe, FE_STATUS); 354 + if (WARN_ON(status & FE_STATUS_QUEUED)) 355 + return; 356 + 357 + /* 358 + * Unconditionally write buffers, global and input parameters. 359 + * Write cropping and output parameters whenever they are enabled. 360 + * Selectively write other parameters that have been marked as 361 + * changed through the dirty flags. 362 + */ 363 + pisp_fe_config_write(fe, cfg, 0, 364 + offsetof(struct pisp_fe_config, decompress)); 365 + cfg->dirty_flags_extra &= ~PISP_FE_DIRTY_GLOBAL; 366 + cfg->dirty_flags &= ~PISP_FE_ENABLE_INPUT; 367 + cfg->dirty_flags |= (cfg->global.enables & 368 + (PISP_FE_ENABLE_STATS_CROP | 369 + PISP_FE_ENABLE_OUTPUT_CLUSTER(0) | 370 + PISP_FE_ENABLE_OUTPUT_CLUSTER(1))); 371 + for (unsigned int i = 0; i < ARRAY_SIZE(pisp_fe_config_map); i++) { 372 + const struct pisp_fe_config_param *p = &pisp_fe_config_map[i]; 373 + 374 + if (cfg->dirty_flags & p->dirty_flags || 375 + cfg->dirty_flags_extra & p->dirty_flags_extra) 376 + pisp_fe_config_write(fe, cfg, p->offset, p->size); 377 + } 378 + 379 + /* This final non-relaxed write serves as a memory barrier */ 380 + pisp_fe_reg_write(fe, FE_CONTROL, FE_CONTROL_QUEUE); 381 + } 382 + 383 + void pisp_fe_start(struct pisp_fe_device *fe) 384 + { 385 + pisp_fe_reg_write(fe, FE_CONTROL, FE_CONTROL_RESET); 386 + pisp_fe_reg_write(fe, FE_INT_STATUS, ~0); 387 + pisp_fe_reg_write(fe, FE_INT_EN, FE_INT_EOF | FE_INT_SOF | 388 + FE_INT_LINES0 | FE_INT_LINES1); 389 + fe->inframe_count = 0; 390 + } 391 + 392 + void pisp_fe_stop(struct pisp_fe_device *fe) 393 + { 394 + pisp_fe_reg_write(fe, FE_INT_EN, 0); 395 + pisp_fe_reg_write(fe, FE_CONTROL, FE_CONTROL_ABORT); 396 + usleep_range(1000, 2000); 397 + WARN_ON(pisp_fe_reg_read(fe, FE_STATUS)); 398 + pisp_fe_reg_write(fe, FE_INT_STATUS, ~0); 399 + } 400 + 401 + static int pisp_fe_init_state(struct v4l2_subdev *sd, 402 + struct v4l2_subdev_state *state) 403 + { 404 + struct v4l2_mbus_framefmt *fmt; 405 + 406 + fmt = v4l2_subdev_state_get_format(state, FE_STREAM_PAD); 407 + *fmt = cfe_default_format; 408 + fmt->code = MEDIA_BUS_FMT_SRGGB16_1X16; 409 + 410 + fmt = v4l2_subdev_state_get_format(state, FE_CONFIG_PAD); 411 + fmt->code = MEDIA_BUS_FMT_FIXED; 412 + fmt->width = sizeof(struct pisp_fe_config); 413 + fmt->height = 1; 414 + 415 + fmt = v4l2_subdev_state_get_format(state, FE_OUTPUT0_PAD); 416 + *fmt = cfe_default_format; 417 + fmt->code = MEDIA_BUS_FMT_SRGGB16_1X16; 418 + 419 + fmt = v4l2_subdev_state_get_format(state, FE_OUTPUT1_PAD); 420 + *fmt = cfe_default_format; 421 + fmt->code = MEDIA_BUS_FMT_SRGGB16_1X16; 422 + 423 + fmt = v4l2_subdev_state_get_format(state, FE_STATS_PAD); 424 + fmt->code = MEDIA_BUS_FMT_FIXED; 425 + fmt->width = sizeof(struct pisp_statistics); 426 + fmt->height = 1; 427 + 428 + return 0; 429 + } 430 + 431 + static int pisp_fe_pad_set_fmt(struct v4l2_subdev *sd, 432 + struct v4l2_subdev_state *state, 433 + struct v4l2_subdev_format *format) 434 + { 435 + struct v4l2_mbus_framefmt *fmt; 436 + const struct cfe_fmt *cfe_fmt; 437 + 438 + /* TODO: format propagation to source pads */ 439 + /* TODO: format validation */ 440 + 441 + switch (format->pad) { 442 + case FE_STREAM_PAD: 443 + cfe_fmt = find_format_by_code(format->format.code); 444 + if (!cfe_fmt || !(cfe_fmt->flags & CFE_FORMAT_FLAG_FE_OUT)) 445 + cfe_fmt = find_format_by_code(MEDIA_BUS_FMT_SRGGB16_1X16); 446 + 447 + format->format.code = cfe_fmt->code; 448 + format->format.field = V4L2_FIELD_NONE; 449 + 450 + fmt = v4l2_subdev_state_get_format(state, FE_STREAM_PAD); 451 + *fmt = format->format; 452 + 453 + fmt = v4l2_subdev_state_get_format(state, FE_OUTPUT0_PAD); 454 + *fmt = format->format; 455 + 456 + fmt = v4l2_subdev_state_get_format(state, FE_OUTPUT1_PAD); 457 + *fmt = format->format; 458 + 459 + return 0; 460 + 461 + case FE_OUTPUT0_PAD: 462 + case FE_OUTPUT1_PAD: { 463 + /* 464 + * TODO: we should allow scaling and cropping by allowing the 465 + * user to set the size here. 466 + */ 467 + struct v4l2_mbus_framefmt *sink_fmt, *source_fmt; 468 + u32 sink_code; 469 + u32 code; 470 + 471 + cfe_fmt = find_format_by_code(format->format.code); 472 + if (!cfe_fmt || !(cfe_fmt->flags & CFE_FORMAT_FLAG_FE_OUT)) 473 + cfe_fmt = find_format_by_code(MEDIA_BUS_FMT_SRGGB16_1X16); 474 + 475 + format->format.code = cfe_fmt->code; 476 + 477 + sink_fmt = v4l2_subdev_state_get_format(state, FE_STREAM_PAD); 478 + if (!sink_fmt) 479 + return -EINVAL; 480 + 481 + source_fmt = v4l2_subdev_state_get_format(state, format->pad); 482 + if (!source_fmt) 483 + return -EINVAL; 484 + 485 + sink_code = sink_fmt->code; 486 + code = format->format.code; 487 + 488 + /* 489 + * If the source code from the user does not match the code in 490 + * the sink pad, check that the source code matches the 491 + * compressed version of the sink code. 492 + */ 493 + 494 + if (code != sink_code && 495 + code == cfe_find_compressed_code(sink_code)) 496 + source_fmt->code = code; 497 + 498 + return 0; 499 + } 500 + 501 + case FE_CONFIG_PAD: 502 + case FE_STATS_PAD: 503 + default: 504 + return v4l2_subdev_get_fmt(sd, state, format); 505 + } 506 + } 507 + 508 + static const struct v4l2_subdev_pad_ops pisp_fe_subdev_pad_ops = { 509 + .get_fmt = v4l2_subdev_get_fmt, 510 + .set_fmt = pisp_fe_pad_set_fmt, 511 + .link_validate = v4l2_subdev_link_validate_default, 512 + }; 513 + 514 + static int pisp_fe_link_validate(struct media_link *link) 515 + { 516 + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(link->sink->entity); 517 + struct pisp_fe_device *fe = container_of(sd, struct pisp_fe_device, sd); 518 + 519 + pisp_fe_dbg(fe, "%s: link \"%s\":%u -> \"%s\":%u\n", __func__, 520 + link->source->entity->name, link->source->index, 521 + link->sink->entity->name, link->sink->index); 522 + 523 + if (link->sink->index == FE_STREAM_PAD) 524 + return v4l2_subdev_link_validate(link); 525 + 526 + if (link->sink->index == FE_CONFIG_PAD) 527 + return 0; 528 + 529 + return -EINVAL; 530 + } 531 + 532 + static const struct media_entity_operations pisp_fe_entity_ops = { 533 + .link_validate = pisp_fe_link_validate, 534 + }; 535 + 536 + static const struct v4l2_subdev_ops pisp_fe_subdev_ops = { 537 + .pad = &pisp_fe_subdev_pad_ops, 538 + }; 539 + 540 + static const struct v4l2_subdev_internal_ops pisp_fe_internal_ops = { 541 + .init_state = pisp_fe_init_state, 542 + }; 543 + 544 + int pisp_fe_init(struct pisp_fe_device *fe, struct dentry *debugfs) 545 + { 546 + int ret; 547 + 548 + debugfs_create_file("fe_regs", 0440, debugfs, fe, &pisp_fe_regs_fops); 549 + 550 + fe->hw_revision = pisp_fe_reg_read(fe, FE_VERSION); 551 + pisp_fe_info(fe, "PiSP FE HW v%u.%u\n", 552 + (fe->hw_revision >> 24) & 0xff, 553 + (fe->hw_revision >> 20) & 0x0f); 554 + 555 + fe->pad[FE_STREAM_PAD].flags = 556 + MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; 557 + fe->pad[FE_CONFIG_PAD].flags = MEDIA_PAD_FL_SINK; 558 + fe->pad[FE_OUTPUT0_PAD].flags = MEDIA_PAD_FL_SOURCE; 559 + fe->pad[FE_OUTPUT1_PAD].flags = MEDIA_PAD_FL_SOURCE; 560 + fe->pad[FE_STATS_PAD].flags = MEDIA_PAD_FL_SOURCE; 561 + 562 + ret = media_entity_pads_init(&fe->sd.entity, ARRAY_SIZE(fe->pad), 563 + fe->pad); 564 + if (ret) 565 + return ret; 566 + 567 + /* Initialize subdev */ 568 + v4l2_subdev_init(&fe->sd, &pisp_fe_subdev_ops); 569 + fe->sd.internal_ops = &pisp_fe_internal_ops; 570 + fe->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER; 571 + fe->sd.entity.ops = &pisp_fe_entity_ops; 572 + fe->sd.entity.name = "pisp-fe"; 573 + fe->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; 574 + fe->sd.owner = THIS_MODULE; 575 + snprintf(fe->sd.name, sizeof(fe->sd.name), "pisp-fe"); 576 + 577 + ret = v4l2_subdev_init_finalize(&fe->sd); 578 + if (ret) 579 + goto err_entity_cleanup; 580 + 581 + ret = v4l2_device_register_subdev(fe->v4l2_dev, &fe->sd); 582 + if (ret) { 583 + pisp_fe_err(fe, "Failed register pisp fe subdev (%d)\n", ret); 584 + goto err_subdev_cleanup; 585 + } 586 + 587 + /* Must be in IDLE state (STATUS == 0) here. */ 588 + WARN_ON(pisp_fe_reg_read(fe, FE_STATUS)); 589 + 590 + return 0; 591 + 592 + err_subdev_cleanup: 593 + v4l2_subdev_cleanup(&fe->sd); 594 + err_entity_cleanup: 595 + media_entity_cleanup(&fe->sd.entity); 596 + 597 + return ret; 598 + } 599 + 600 + void pisp_fe_uninit(struct pisp_fe_device *fe) 601 + { 602 + v4l2_device_unregister_subdev(&fe->sd); 603 + v4l2_subdev_cleanup(&fe->sd); 604 + media_entity_cleanup(&fe->sd.entity); 605 + }
+53
drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * PiSP Front End Driver 4 + * 5 + * Copyright (c) 2021-2024 Raspberry Pi Ltd. 6 + */ 7 + #ifndef _PISP_FE_H_ 8 + #define _PISP_FE_H_ 9 + 10 + #include <linux/debugfs.h> 11 + #include <linux/io.h> 12 + #include <linux/types.h> 13 + #include <linux/videodev2.h> 14 + 15 + #include <media/media-device.h> 16 + #include <media/v4l2-device.h> 17 + #include <media/v4l2-subdev.h> 18 + 19 + #include <linux/media/raspberrypi/pisp_fe_config.h> 20 + 21 + enum pisp_fe_pads { 22 + FE_STREAM_PAD, 23 + FE_CONFIG_PAD, 24 + FE_OUTPUT0_PAD, 25 + FE_OUTPUT1_PAD, 26 + FE_STATS_PAD, 27 + FE_NUM_PADS 28 + }; 29 + 30 + struct pisp_fe_device { 31 + /* Parent V4l2 device */ 32 + struct v4l2_device *v4l2_dev; 33 + void __iomem *base; 34 + u32 hw_revision; 35 + 36 + u16 inframe_count; 37 + struct media_pad pad[FE_NUM_PADS]; 38 + struct v4l2_subdev sd; 39 + }; 40 + 41 + void pisp_fe_isr(struct pisp_fe_device *fe, bool *sof, bool *eof); 42 + int pisp_fe_validate_config(struct pisp_fe_device *fe, 43 + struct pisp_fe_config *cfg, 44 + struct v4l2_format const *f0, 45 + struct v4l2_format const *f1); 46 + void pisp_fe_submit_job(struct pisp_fe_device *fe, struct vb2_buffer **vb2_bufs, 47 + struct pisp_fe_config *cfg); 48 + void pisp_fe_start(struct pisp_fe_device *fe); 49 + void pisp_fe_stop(struct pisp_fe_device *fe); 50 + int pisp_fe_init(struct pisp_fe_device *fe, struct dentry *debugfs); 51 + void pisp_fe_uninit(struct pisp_fe_device *fe); 52 + 53 + #endif
+273
include/uapi/linux/media/raspberrypi/pisp_fe_config.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 + /* 3 + * RP1 PiSP Front End Driver Configuration structures 4 + * 5 + * Copyright (C) 2021 - Raspberry Pi Ltd. 6 + * 7 + */ 8 + #ifndef _UAPI_PISP_FE_CONFIG_ 9 + #define _UAPI_PISP_FE_CONFIG_ 10 + 11 + #include <linux/types.h> 12 + 13 + #include "pisp_common.h" 14 + #include "pisp_fe_statistics.h" 15 + 16 + #define PISP_FE_NUM_OUTPUTS 2 17 + 18 + enum pisp_fe_enable { 19 + PISP_FE_ENABLE_INPUT = 0x000001, 20 + PISP_FE_ENABLE_DECOMPRESS = 0x000002, 21 + PISP_FE_ENABLE_DECOMPAND = 0x000004, 22 + PISP_FE_ENABLE_BLA = 0x000008, 23 + PISP_FE_ENABLE_DPC = 0x000010, 24 + PISP_FE_ENABLE_STATS_CROP = 0x000020, 25 + PISP_FE_ENABLE_DECIMATE = 0x000040, 26 + PISP_FE_ENABLE_BLC = 0x000080, 27 + PISP_FE_ENABLE_CDAF_STATS = 0x000100, 28 + PISP_FE_ENABLE_AWB_STATS = 0x000200, 29 + PISP_FE_ENABLE_RGBY = 0x000400, 30 + PISP_FE_ENABLE_LSC = 0x000800, 31 + PISP_FE_ENABLE_AGC_STATS = 0x001000, 32 + PISP_FE_ENABLE_CROP0 = 0x010000, 33 + PISP_FE_ENABLE_DOWNSCALE0 = 0x020000, 34 + PISP_FE_ENABLE_COMPRESS0 = 0x040000, 35 + PISP_FE_ENABLE_OUTPUT0 = 0x080000, 36 + PISP_FE_ENABLE_CROP1 = 0x100000, 37 + PISP_FE_ENABLE_DOWNSCALE1 = 0x200000, 38 + PISP_FE_ENABLE_COMPRESS1 = 0x400000, 39 + PISP_FE_ENABLE_OUTPUT1 = 0x800000 40 + }; 41 + 42 + #define PISP_FE_ENABLE_CROP(i) (PISP_FE_ENABLE_CROP0 << (4 * (i))) 43 + #define PISP_FE_ENABLE_DOWNSCALE(i) (PISP_FE_ENABLE_DOWNSCALE0 << (4 * (i))) 44 + #define PISP_FE_ENABLE_COMPRESS(i) (PISP_FE_ENABLE_COMPRESS0 << (4 * (i))) 45 + #define PISP_FE_ENABLE_OUTPUT(i) (PISP_FE_ENABLE_OUTPUT0 << (4 * (i))) 46 + 47 + /* 48 + * We use the enable flags to show when blocks are "dirty", but we need some 49 + * extra ones too. 50 + */ 51 + enum pisp_fe_dirty { 52 + PISP_FE_DIRTY_GLOBAL = 0x0001, 53 + PISP_FE_DIRTY_FLOATING = 0x0002, 54 + PISP_FE_DIRTY_OUTPUT_AXI = 0x0004 55 + }; 56 + 57 + struct pisp_fe_global_config { 58 + __u32 enables; 59 + __u8 bayer_order; 60 + __u8 pad[3]; 61 + } __attribute__((packed)); 62 + 63 + struct pisp_fe_input_axi_config { 64 + /* burst length minus one, in the range 0..15; OR'd with flags */ 65 + __u8 maxlen_flags; 66 + /* { prot[2:0], cache[3:0] } fields */ 67 + __u8 cache_prot; 68 + /* QoS (only 4 LS bits are used) */ 69 + __u16 qos; 70 + } __attribute__((packed)); 71 + 72 + struct pisp_fe_output_axi_config { 73 + /* burst length minus one, in the range 0..15; OR'd with flags */ 74 + __u8 maxlen_flags; 75 + /* { prot[2:0], cache[3:0] } fields */ 76 + __u8 cache_prot; 77 + /* QoS (4 bitfields of 4 bits each for different panic levels) */ 78 + __u16 qos; 79 + /* For Panic mode: Output FIFO panic threshold */ 80 + __u16 thresh; 81 + /* For Panic mode: Output FIFO statistics throttle threshold */ 82 + __u16 throttle; 83 + } __attribute__((packed)); 84 + 85 + struct pisp_fe_input_config { 86 + __u8 streaming; 87 + __u8 pad[3]; 88 + struct pisp_image_format_config format; 89 + struct pisp_fe_input_axi_config axi; 90 + /* Extra cycles delay before issuing each burst request */ 91 + __u8 holdoff; 92 + __u8 pad2[3]; 93 + } __attribute__((packed)); 94 + 95 + struct pisp_fe_output_config { 96 + struct pisp_image_format_config format; 97 + __u16 ilines; 98 + __u8 pad[2]; 99 + } __attribute__((packed)); 100 + 101 + struct pisp_fe_input_buffer_config { 102 + __u32 addr_lo; 103 + __u32 addr_hi; 104 + __u16 frame_id; 105 + __u16 pad; 106 + } __attribute__((packed)); 107 + 108 + #define PISP_FE_DECOMPAND_LUT_SIZE 65 109 + 110 + struct pisp_fe_decompand_config { 111 + __u16 lut[PISP_FE_DECOMPAND_LUT_SIZE]; 112 + __u16 pad; 113 + } __attribute__((packed)); 114 + 115 + struct pisp_fe_dpc_config { 116 + __u8 coeff_level; 117 + __u8 coeff_range; 118 + __u8 coeff_range2; 119 + #define PISP_FE_DPC_FLAG_FOLDBACK 1 120 + #define PISP_FE_DPC_FLAG_VFLAG 2 121 + __u8 flags; 122 + } __attribute__((packed)); 123 + 124 + #define PISP_FE_LSC_LUT_SIZE 16 125 + 126 + struct pisp_fe_lsc_config { 127 + __u8 shift; 128 + __u8 pad0; 129 + __u16 scale; 130 + __u16 centre_x; 131 + __u16 centre_y; 132 + __u16 lut[PISP_FE_LSC_LUT_SIZE]; 133 + } __attribute__((packed)); 134 + 135 + struct pisp_fe_rgby_config { 136 + __u16 gain_r; 137 + __u16 gain_g; 138 + __u16 gain_b; 139 + __u8 maxflag; 140 + __u8 pad; 141 + } __attribute__((packed)); 142 + 143 + struct pisp_fe_agc_stats_config { 144 + __u16 offset_x; 145 + __u16 offset_y; 146 + __u16 size_x; 147 + __u16 size_y; 148 + /* each weight only 4 bits */ 149 + __u8 weights[PISP_AGC_STATS_NUM_ZONES / 2]; 150 + __u16 row_offset_x; 151 + __u16 row_offset_y; 152 + __u16 row_size_x; 153 + __u16 row_size_y; 154 + __u8 row_shift; 155 + __u8 float_shift; 156 + __u8 pad1[2]; 157 + } __attribute__((packed)); 158 + 159 + struct pisp_fe_awb_stats_config { 160 + __u16 offset_x; 161 + __u16 offset_y; 162 + __u16 size_x; 163 + __u16 size_y; 164 + __u8 shift; 165 + __u8 pad[3]; 166 + __u16 r_lo; 167 + __u16 r_hi; 168 + __u16 g_lo; 169 + __u16 g_hi; 170 + __u16 b_lo; 171 + __u16 b_hi; 172 + } __attribute__((packed)); 173 + 174 + struct pisp_fe_floating_stats_region { 175 + __u16 offset_x; 176 + __u16 offset_y; 177 + __u16 size_x; 178 + __u16 size_y; 179 + } __attribute__((packed)); 180 + 181 + struct pisp_fe_floating_stats_config { 182 + struct pisp_fe_floating_stats_region 183 + regions[PISP_FLOATING_STATS_NUM_ZONES]; 184 + } __attribute__((packed)); 185 + 186 + #define PISP_FE_CDAF_NUM_WEIGHTS 8 187 + 188 + struct pisp_fe_cdaf_stats_config { 189 + __u16 noise_constant; 190 + __u16 noise_slope; 191 + __u16 offset_x; 192 + __u16 offset_y; 193 + __u16 size_x; 194 + __u16 size_y; 195 + __u16 skip_x; 196 + __u16 skip_y; 197 + __u32 mode; 198 + } __attribute__((packed)); 199 + 200 + struct pisp_fe_stats_buffer_config { 201 + __u32 addr_lo; 202 + __u32 addr_hi; 203 + } __attribute__((packed)); 204 + 205 + struct pisp_fe_crop_config { 206 + __u16 offset_x; 207 + __u16 offset_y; 208 + __u16 width; 209 + __u16 height; 210 + } __attribute__((packed)); 211 + 212 + enum pisp_fe_downscale_flags { 213 + /* downscale the four Bayer components independently... */ 214 + DOWNSCALE_BAYER = 1, 215 + /* ...without trying to preserve their spatial relationship */ 216 + DOWNSCALE_BIN = 2, 217 + }; 218 + 219 + struct pisp_fe_downscale_config { 220 + __u8 xin; 221 + __u8 xout; 222 + __u8 yin; 223 + __u8 yout; 224 + __u8 flags; /* enum pisp_fe_downscale_flags */ 225 + __u8 pad[3]; 226 + __u16 output_width; 227 + __u16 output_height; 228 + } __attribute__((packed)); 229 + 230 + struct pisp_fe_output_buffer_config { 231 + __u32 addr_lo; 232 + __u32 addr_hi; 233 + } __attribute__((packed)); 234 + 235 + /* Each of the two output channels/branches: */ 236 + struct pisp_fe_output_branch_config { 237 + struct pisp_fe_crop_config crop; 238 + struct pisp_fe_downscale_config downscale; 239 + struct pisp_compress_config compress; 240 + struct pisp_fe_output_config output; 241 + __u32 pad; 242 + } __attribute__((packed)); 243 + 244 + /* And finally one to rule them all: */ 245 + struct pisp_fe_config { 246 + /* I/O configuration: */ 247 + struct pisp_fe_stats_buffer_config stats_buffer; 248 + struct pisp_fe_output_buffer_config output_buffer[PISP_FE_NUM_OUTPUTS]; 249 + struct pisp_fe_input_buffer_config input_buffer; 250 + /* processing configuration: */ 251 + struct pisp_fe_global_config global; 252 + struct pisp_fe_input_config input; 253 + struct pisp_decompress_config decompress; 254 + struct pisp_fe_decompand_config decompand; 255 + struct pisp_bla_config bla; 256 + struct pisp_fe_dpc_config dpc; 257 + struct pisp_fe_crop_config stats_crop; 258 + __u32 spare1; /* placeholder for future decimate configuration */ 259 + struct pisp_bla_config blc; 260 + struct pisp_fe_rgby_config rgby; 261 + struct pisp_fe_lsc_config lsc; 262 + struct pisp_fe_agc_stats_config agc_stats; 263 + struct pisp_fe_awb_stats_config awb_stats; 264 + struct pisp_fe_cdaf_stats_config cdaf_stats; 265 + struct pisp_fe_floating_stats_config floating_stats; 266 + struct pisp_fe_output_axi_config output_axi; 267 + struct pisp_fe_output_branch_config ch[PISP_FE_NUM_OUTPUTS]; 268 + /* non-register fields: */ 269 + __u32 dirty_flags; /* these use pisp_fe_enable */ 270 + __u32 dirty_flags_extra; /* these use pisp_fe_dirty */ 271 + } __attribute__((packed)); 272 + 273 + #endif /* _UAPI_PISP_FE_CONFIG_ */
+64
include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 + /* 3 + * RP1 PiSP Front End statistics definitions 4 + * 5 + * Copyright (C) 2021 - Raspberry Pi Ltd. 6 + * 7 + */ 8 + #ifndef _UAPI_PISP_FE_STATISTICS_H_ 9 + #define _UAPI_PISP_FE_STATISTICS_H_ 10 + 11 + #include <linux/types.h> 12 + 13 + #define PISP_FLOATING_STATS_NUM_ZONES 4 14 + #define PISP_AGC_STATS_NUM_BINS 1024 15 + #define PISP_AGC_STATS_SIZE 16 16 + #define PISP_AGC_STATS_NUM_ZONES (PISP_AGC_STATS_SIZE * PISP_AGC_STATS_SIZE) 17 + #define PISP_AGC_STATS_NUM_ROW_SUMS 512 18 + 19 + struct pisp_agc_statistics_zone { 20 + __u64 Y_sum; 21 + __u32 counted; 22 + __u32 pad; 23 + } __attribute__((packed)); 24 + 25 + struct pisp_agc_statistics { 26 + __u32 row_sums[PISP_AGC_STATS_NUM_ROW_SUMS]; 27 + /* 28 + * 32-bits per bin means an image (just less than) 16384x16384 pixels 29 + * in size can weight every pixel from 0 to 15. 30 + */ 31 + __u32 histogram[PISP_AGC_STATS_NUM_BINS]; 32 + struct pisp_agc_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES]; 33 + } __attribute__((packed)); 34 + 35 + #define PISP_AWB_STATS_SIZE 32 36 + #define PISP_AWB_STATS_NUM_ZONES (PISP_AWB_STATS_SIZE * PISP_AWB_STATS_SIZE) 37 + 38 + struct pisp_awb_statistics_zone { 39 + __u32 R_sum; 40 + __u32 G_sum; 41 + __u32 B_sum; 42 + __u32 counted; 43 + } __attribute__((packed)); 44 + 45 + struct pisp_awb_statistics { 46 + struct pisp_awb_statistics_zone zones[PISP_AWB_STATS_NUM_ZONES]; 47 + struct pisp_awb_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES]; 48 + } __attribute__((packed)); 49 + 50 + #define PISP_CDAF_STATS_SIZE 8 51 + #define PISP_CDAF_STATS_NUM_FOMS (PISP_CDAF_STATS_SIZE * PISP_CDAF_STATS_SIZE) 52 + 53 + struct pisp_cdaf_statistics { 54 + __u64 foms[PISP_CDAF_STATS_NUM_FOMS]; 55 + __u64 floating[PISP_FLOATING_STATS_NUM_ZONES]; 56 + } __attribute__((packed)); 57 + 58 + struct pisp_statistics { 59 + struct pisp_awb_statistics awb; 60 + struct pisp_agc_statistics agc; 61 + struct pisp_cdaf_statistics cdaf; 62 + } __attribute__((packed)); 63 + 64 + #endif /* _UAPI_PISP_FE_STATISTICS_H_ */