Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media

Pull new experimental media request API from Mauro Carvalho Chehab:
"A new media request API

This API is needed to support device drivers that can dynamically
change their parameters for each new frame. The latest versions of
Google camera and codec HAL depends on such feature.

At this stage, it supports only stateless codecs.

It has been discussed for a long time (at least over the last 3-4
years), and we finally reached to something that seem to work.

This series contain both the API and core changes required to support
it and a new m2m decoder driver (cedrus).

As the current API is still experimental, the only real driver using
it (cedrus) was added at staging[1]. We intend to keep it there for a
while, in order to test the API. Only when we're sure that this API
works for other cases (like encoders), we'll move this driver out of
staging and set the API into a stone.

[1] We added support for the vivid virtual driver (used only for
testing) to it too, as it makes easier to test the API for the ones
that don't have the cedrus hardware"

* tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: (53 commits)
media: dt-bindings: Document the Rockchip VPU bindings
media: platform: Add Cedrus VPU decoder driver
media: dt-bindings: media: Document bindings for the Cedrus VPU driver
media: v4l: Add definition for the Sunxi tiled NV12 format
media: v4l: Add definitions for MPEG-2 slice format and metadata
media: videobuf2-core: Rework and rename helper for request buffer count
media: v4l2-ctrls.c: initialize an error return code with zero
media: v4l2-compat-ioctl32.c: add missing documentation for a field
media: media-request: update documentation
media: media-request: EPERM -> EACCES/EBUSY
media: v4l2-ctrls: improve media_request_(un)lock_for_update
media: v4l2-ctrls: use media_request_(un)lock_for_access
media: media-request: add media_request_(un)lock_for_access
media: vb2: set reqbufs/create_bufs capabilities
media: videodev2.h: add new capabilities for buffer types
media: buffer.rst: only set V4L2_BUF_FLAG_REQUEST_FD for QBUF
media: v4l2-ctrls: return -EACCES if request wasn't completed
media: media-request: return -EINVAL for invalid request_fds
media: vivid: add request support
media: vivid: add mc
...

+6165 -384
+54
Documentation/devicetree/bindings/media/cedrus.txt
··· 1 + Device-tree bindings for the VPU found in Allwinner SoCs, referred to as the 2 + Video Engine (VE) in Allwinner literature. 3 + 4 + The VPU can only access the first 256 MiB of DRAM, that are DMA-mapped starting 5 + from the DRAM base. This requires specific memory allocation and handling. 6 + 7 + Required properties: 8 + - compatible : must be one of the following compatibles: 9 + - "allwinner,sun4i-a10-video-engine" 10 + - "allwinner,sun5i-a13-video-engine" 11 + - "allwinner,sun7i-a20-video-engine" 12 + - "allwinner,sun8i-a33-video-engine" 13 + - "allwinner,sun8i-h3-video-engine" 14 + - reg : register base and length of VE; 15 + - clocks : list of clock specifiers, corresponding to entries in 16 + the clock-names property; 17 + - clock-names : should contain "ahb", "mod" and "ram" entries; 18 + - resets : phandle for reset; 19 + - interrupts : VE interrupt number; 20 + - allwinner,sram : SRAM region to use with the VE. 21 + 22 + Optional properties: 23 + - memory-region : CMA pool to use for buffers allocation instead of the 24 + default CMA pool. 25 + 26 + Example: 27 + 28 + reserved-memory { 29 + #address-cells = <1>; 30 + #size-cells = <1>; 31 + ranges; 32 + 33 + /* Address must be kept in the lower 256 MiBs of DRAM for VE. */ 34 + cma_pool: cma@4a000000 { 35 + compatible = "shared-dma-pool"; 36 + size = <0x6000000>; 37 + alloc-ranges = <0x4a000000 0x6000000>; 38 + reusable; 39 + linux,cma-default; 40 + }; 41 + }; 42 + 43 + video-codec@1c0e000 { 44 + compatible = "allwinner,sun7i-a20-video-engine"; 45 + reg = <0x01c0e000 0x1000>; 46 + 47 + clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>, 48 + <&ccu CLK_DRAM_VE>; 49 + clock-names = "ahb", "mod", "ram"; 50 + 51 + resets = <&ccu RST_VE>; 52 + interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; 53 + allwinner,sram = <&ve_sram 1>; 54 + };
+29
Documentation/devicetree/bindings/media/rockchip-vpu.txt
··· 1 + device-tree bindings for rockchip VPU codec 2 + 3 + Rockchip (Video Processing Unit) present in various Rockchip platforms, 4 + such as RK3288 and RK3399. 5 + 6 + Required properties: 7 + - compatible: value should be one of the following 8 + "rockchip,rk3288-vpu"; 9 + "rockchip,rk3399-vpu"; 10 + - interrupts: encoding and decoding interrupt specifiers 11 + - interrupt-names: should be "vepu" and "vdpu" 12 + - clocks: phandle to VPU aclk, hclk clocks 13 + - clock-names: should be "aclk" and "hclk" 14 + - power-domains: phandle to power domain node 15 + - iommus: phandle to a iommu node 16 + 17 + Example: 18 + SoC-specific DT entry: 19 + vpu: video-codec@ff9a0000 { 20 + compatible = "rockchip,rk3288-vpu"; 21 + reg = <0x0 0xff9a0000 0x0 0x800>; 22 + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 23 + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; 24 + interrupt-names = "vepu", "vdpu"; 25 + clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>; 26 + clock-names = "aclk", "hclk"; 27 + power-domains = <&power RK3288_PD_VIDEO>; 28 + iommus = <&vpu_mmu>; 29 + };
+2
Documentation/media/kapi/mc-core.rst
··· 262 262 .. kernel-doc:: include/media/media-devnode.h 263 263 264 264 .. kernel-doc:: include/media/media-entity.h 265 + 266 + .. kernel-doc:: include/media/media-request.h
+1
Documentation/media/uapi/mediactl/media-controller.rst
··· 21 21 media-controller-intro 22 22 media-controller-model 23 23 media-types 24 + request-api 24 25 media-funcs 25 26 media-header 26 27
+6
Documentation/media/uapi/mediactl/media-funcs.rst
··· 16 16 media-ioc-enum-entities 17 17 media-ioc-enum-links 18 18 media-ioc-setup-link 19 + media-ioc-request-alloc 20 + request-func-close 21 + request-func-ioctl 22 + request-func-poll 23 + media-request-ioc-queue 24 + media-request-ioc-reinit
+66
Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _media_ioc_request_alloc: 4 + 5 + ***************************** 6 + ioctl MEDIA_IOC_REQUEST_ALLOC 7 + ***************************** 8 + 9 + Name 10 + ==== 11 + 12 + MEDIA_IOC_REQUEST_ALLOC - Allocate a request 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. c:function:: int ioctl( int fd, MEDIA_IOC_REQUEST_ALLOC, int *argp ) 19 + :name: MEDIA_IOC_REQUEST_ALLOC 20 + 21 + 22 + Arguments 23 + ========= 24 + 25 + ``fd`` 26 + File descriptor returned by :ref:`open() <media-func-open>`. 27 + 28 + ``argp`` 29 + Pointer to an integer. 30 + 31 + 32 + Description 33 + =========== 34 + 35 + If the media device supports :ref:`requests <media-request-api>`, then 36 + this ioctl can be used to allocate a request. If it is not supported, then 37 + ``errno`` is set to ``ENOTTY``. A request is accessed through a file descriptor 38 + that is returned in ``*argp``. 39 + 40 + If the request was successfully allocated, then the request file descriptor 41 + can be passed to the :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`, 42 + :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, 43 + :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` and 44 + :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctls. 45 + 46 + In addition, the request can be queued by calling 47 + :ref:`MEDIA_REQUEST_IOC_QUEUE` and re-initialized by calling 48 + :ref:`MEDIA_REQUEST_IOC_REINIT`. 49 + 50 + Finally, the file descriptor can be :ref:`polled <request-func-poll>` to wait 51 + for the request to complete. 52 + 53 + The request will remain allocated until all the file descriptors associated 54 + with it are closed by :ref:`close() <request-func-close>` and the driver no 55 + longer uses the request internally. See also 56 + :ref:`here <media-request-life-time>` for more information. 57 + 58 + Return Value 59 + ============ 60 + 61 + On success 0 is returned, on error -1 and the ``errno`` variable is set 62 + appropriately. The generic error codes are described at the 63 + :ref:`Generic Error Codes <gen-errors>` chapter. 64 + 65 + ENOTTY 66 + The driver has no support for requests.
+78
Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _media_request_ioc_queue: 4 + 5 + ***************************** 6 + ioctl MEDIA_REQUEST_IOC_QUEUE 7 + ***************************** 8 + 9 + Name 10 + ==== 11 + 12 + MEDIA_REQUEST_IOC_QUEUE - Queue a request 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. c:function:: int ioctl( int request_fd, MEDIA_REQUEST_IOC_QUEUE ) 19 + :name: MEDIA_REQUEST_IOC_QUEUE 20 + 21 + 22 + Arguments 23 + ========= 24 + 25 + ``request_fd`` 26 + File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`. 27 + 28 + 29 + Description 30 + =========== 31 + 32 + If the media device supports :ref:`requests <media-request-api>`, then 33 + this request ioctl can be used to queue a previously allocated request. 34 + 35 + If the request was successfully queued, then the file descriptor can be 36 + :ref:`polled <request-func-poll>` to wait for the request to complete. 37 + 38 + If the request was already queued before, then ``EBUSY`` is returned. 39 + Other errors can be returned if the contents of the request contained 40 + invalid or inconsistent data, see the next section for a list of 41 + common error codes. On error both the request and driver state are unchanged. 42 + 43 + Once a request is queued, then the driver is required to gracefully handle 44 + errors that occur when the request is applied to the hardware. The 45 + exception is the ``EIO`` error which signals a fatal error that requires 46 + the application to stop streaming to reset the hardware state. 47 + 48 + It is not allowed to mix queuing requests with queuing buffers directly 49 + (without a request). ``EBUSY`` will be returned if the first buffer was 50 + queued directly and you next try to queue a request, or vice versa. 51 + 52 + A request must contain at least one buffer, otherwise this ioctl will 53 + return an ``ENOENT`` error. 54 + 55 + Return Value 56 + ============ 57 + 58 + On success 0 is returned, on error -1 and the ``errno`` variable is set 59 + appropriately. The generic error codes are described at the 60 + :ref:`Generic Error Codes <gen-errors>` chapter. 61 + 62 + EBUSY 63 + The request was already queued or the application queued the first 64 + buffer directly, but later attempted to use a request. It is not permitted 65 + to mix the two APIs. 66 + ENOENT 67 + The request did not contain any buffers. All requests are required 68 + to have at least one buffer. This can also be returned if some required 69 + configuration is missing in the request. 70 + ENOMEM 71 + Out of memory when allocating internal data structures for this 72 + request. 73 + EINVAL 74 + The request has invalid data. 75 + EIO 76 + The hardware is in a bad state. To recover, the application needs to 77 + stop streaming to reset the hardware state and then try to restart 78 + streaming.
+51
Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _media_request_ioc_reinit: 4 + 5 + ****************************** 6 + ioctl MEDIA_REQUEST_IOC_REINIT 7 + ****************************** 8 + 9 + Name 10 + ==== 11 + 12 + MEDIA_REQUEST_IOC_REINIT - Re-initialize a request 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. c:function:: int ioctl( int request_fd, MEDIA_REQUEST_IOC_REINIT ) 19 + :name: MEDIA_REQUEST_IOC_REINIT 20 + 21 + 22 + Arguments 23 + ========= 24 + 25 + ``request_fd`` 26 + File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`. 27 + 28 + Description 29 + =========== 30 + 31 + If the media device supports :ref:`requests <media-request-api>`, then 32 + this request ioctl can be used to re-initialize a previously allocated 33 + request. 34 + 35 + Re-initializing a request will clear any existing data from the request. 36 + This avoids having to :ref:`close() <request-func-close>` a completed 37 + request and allocate a new request. Instead the completed request can just 38 + be re-initialized and it is ready to be used again. 39 + 40 + A request can only be re-initialized if it either has not been queued 41 + yet, or if it was queued and completed. Otherwise it will set ``errno`` 42 + to ``EBUSY``. No other error codes can be returned. 43 + 44 + Return Value 45 + ============ 46 + 47 + On success 0 is returned, on error -1 and the ``errno`` variable is set 48 + appropriately. 49 + 50 + EBUSY 51 + The request is queued but not yet completed.
+252
Documentation/media/uapi/mediactl/request-api.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _media-request-api: 4 + 5 + Request API 6 + =========== 7 + 8 + The Request API has been designed to allow V4L2 to deal with requirements of 9 + modern devices (stateless codecs, complex camera pipelines, ...) and APIs 10 + (Android Codec v2). One such requirement is the ability for devices belonging to 11 + the same pipeline to reconfigure and collaborate closely on a per-frame basis. 12 + Another is support of stateless codecs, which require controls to be applied 13 + to specific frames (aka 'per-frame controls') in order to be used efficiently. 14 + 15 + While the initial use-case was V4L2, it can be extended to other subsystems 16 + as well, as long as they use the media controller. 17 + 18 + Supporting these features without the Request API is not always possible and if 19 + it is, it is terribly inefficient: user-space would have to flush all activity 20 + on the media pipeline, reconfigure it for the next frame, queue the buffers to 21 + be processed with that configuration, and wait until they are all available for 22 + dequeuing before considering the next frame. This defeats the purpose of having 23 + buffer queues since in practice only one buffer would be queued at a time. 24 + 25 + The Request API allows a specific configuration of the pipeline (media 26 + controller topology + configuration for each media entity) to be associated with 27 + specific buffers. This allows user-space to schedule several tasks ("requests") 28 + with different configurations in advance, knowing that the configuration will be 29 + applied when needed to get the expected result. Configuration values at the time 30 + of request completion are also available for reading. 31 + 32 + Usage 33 + ===== 34 + 35 + The Request API extends the Media Controller API and cooperates with 36 + subsystem-specific APIs to support request usage. At the Media Controller 37 + level, requests are allocated from the supporting Media Controller device 38 + node. Their life cycle is then managed through the request file descriptors in 39 + an opaque way. Configuration data, buffer handles and processing results 40 + stored in requests are accessed through subsystem-specific APIs extended for 41 + request support, such as V4L2 APIs that take an explicit ``request_fd`` 42 + parameter. 43 + 44 + Request Allocation 45 + ------------------ 46 + 47 + User-space allocates requests using :ref:`MEDIA_IOC_REQUEST_ALLOC` 48 + for the media device node. This returns a file descriptor representing the 49 + request. Typically, several such requests will be allocated. 50 + 51 + Request Preparation 52 + ------------------- 53 + 54 + Standard V4L2 ioctls can then receive a request file descriptor to express the 55 + fact that the ioctl is part of said request, and is not to be applied 56 + immediately. See :ref:`MEDIA_IOC_REQUEST_ALLOC` for a list of ioctls that 57 + support this. Configurations set with a ``request_fd`` parameter are stored 58 + instead of being immediately applied, and buffers queued to a request do not 59 + enter the regular buffer queue until the request itself is queued. 60 + 61 + Request Submission 62 + ------------------ 63 + 64 + Once the configuration and buffers of the request are specified, it can be 65 + queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor. 66 + A request must contain at least one buffer, otherwise ``ENOENT`` is returned. 67 + A queued request cannot be modified anymore. 68 + 69 + .. caution:: 70 + For :ref:`memory-to-memory devices <codec>` you can use requests only for 71 + output buffers, not for capture buffers. Attempting to add a capture buffer 72 + to a request will result in an ``EACCES`` error. 73 + 74 + If the request contains configurations for multiple entities, individual drivers 75 + may synchronize so the requested pipeline's topology is applied before the 76 + buffers are processed. Media controller drivers do a best effort implementation 77 + since perfect atomicity may not be possible due to hardware limitations. 78 + 79 + .. caution:: 80 + 81 + It is not allowed to mix queuing requests with directly queuing buffers: 82 + whichever method is used first locks this in place until 83 + :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` is called or the device is 84 + :ref:`closed <func-close>`. Attempts to directly queue a buffer when earlier 85 + a buffer was queued via a request or vice versa will result in an ``EBUSY`` 86 + error. 87 + 88 + Controls can still be set without a request and are applied immediately, 89 + regardless of whether a request is in use or not. 90 + 91 + .. caution:: 92 + 93 + Setting the same control through a request and also directly can lead to 94 + undefined behavior! 95 + 96 + User-space can :ref:`poll() <request-func-poll>` a request file descriptor in 97 + order to wait until the request completes. A request is considered complete 98 + once all its associated buffers are available for dequeuing and all the 99 + associated controls have been updated with the values at the time of completion. 100 + Note that user-space does not need to wait for the request to complete to 101 + dequeue its buffers: buffers that are available halfway through a request can 102 + be dequeued independently of the request's state. 103 + 104 + A completed request contains the state of the device after the request was 105 + executed. User-space can query that state by calling 106 + :ref:`ioctl VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` with the request file 107 + descriptor. Calling :ref:`ioctl VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` for a 108 + request that has been queued but not yet completed will return ``EBUSY`` 109 + since the control values might be changed at any time by the driver while the 110 + request is in flight. 111 + 112 + .. _media-request-life-time: 113 + 114 + Recycling and Destruction 115 + ------------------------- 116 + 117 + Finally, a completed request can either be discarded or be reused. Calling 118 + :ref:`close() <request-func-close>` on a request file descriptor will make 119 + that file descriptor unusable and the request will be freed once it is no 120 + longer in use by the kernel. That is, if the request is queued and then the 121 + file descriptor is closed, then it won't be freed until the driver completed 122 + the request. 123 + 124 + The :ref:`MEDIA_REQUEST_IOC_REINIT` will clear a request's state and make it 125 + available again. No state is retained by this operation: the request is as 126 + if it had just been allocated. 127 + 128 + Example for a Codec Device 129 + -------------------------- 130 + 131 + For use-cases such as :ref:`codecs <codec>`, the request API can be used 132 + to associate specific controls to 133 + be applied by the driver for the OUTPUT buffer, allowing user-space 134 + to queue many such buffers in advance. It can also take advantage of requests' 135 + ability to capture the state of controls when the request completes to read back 136 + information that may be subject to change. 137 + 138 + Put into code, after obtaining a request, user-space can assign controls and one 139 + OUTPUT buffer to it: 140 + 141 + .. code-block:: c 142 + 143 + struct v4l2_buffer buf; 144 + struct v4l2_ext_controls ctrls; 145 + int req_fd; 146 + ... 147 + if (ioctl(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd)) 148 + return errno; 149 + ... 150 + ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL; 151 + ctrls.request_fd = req_fd; 152 + if (ioctl(codec_fd, VIDIOC_S_EXT_CTRLS, &ctrls)) 153 + return errno; 154 + ... 155 + buf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 156 + buf.flags |= V4L2_BUF_FLAG_REQUEST_FD; 157 + buf.request_fd = req_fd; 158 + if (ioctl(codec_fd, VIDIOC_QBUF, &buf)) 159 + return errno; 160 + 161 + Note that it is not allowed to use the Request API for CAPTURE buffers 162 + since there are no per-frame settings to report there. 163 + 164 + Once the request is fully prepared, it can be queued to the driver: 165 + 166 + .. code-block:: c 167 + 168 + if (ioctl(req_fd, MEDIA_REQUEST_IOC_QUEUE)) 169 + return errno; 170 + 171 + User-space can then either wait for the request to complete by calling poll() on 172 + its file descriptor, or start dequeuing CAPTURE buffers. Most likely, it will 173 + want to get CAPTURE buffers as soon as possible and this can be done using a 174 + regular :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`: 175 + 176 + .. code-block:: c 177 + 178 + struct v4l2_buffer buf; 179 + 180 + memset(&buf, 0, sizeof(buf)); 181 + buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 182 + if (ioctl(codec_fd, VIDIOC_DQBUF, &buf)) 183 + return errno; 184 + 185 + Note that this example assumes for simplicity that for every OUTPUT buffer 186 + there will be one CAPTURE buffer, but this does not have to be the case. 187 + 188 + We can then, after ensuring that the request is completed via polling the 189 + request file descriptor, query control values at the time of its completion via 190 + a call to :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`. 191 + This is particularly useful for volatile controls for which we want to 192 + query values as soon as the capture buffer is produced. 193 + 194 + .. code-block:: c 195 + 196 + struct pollfd pfd = { .events = POLLPRI, .fd = req_fd }; 197 + poll(&pfd, 1, -1); 198 + ... 199 + ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL; 200 + ctrls.request_fd = req_fd; 201 + if (ioctl(codec_fd, VIDIOC_G_EXT_CTRLS, &ctrls)) 202 + return errno; 203 + 204 + Once we don't need the request anymore, we can either recycle it for reuse with 205 + :ref:`MEDIA_REQUEST_IOC_REINIT`... 206 + 207 + .. code-block:: c 208 + 209 + if (ioctl(req_fd, MEDIA_REQUEST_IOC_REINIT)) 210 + return errno; 211 + 212 + ... or close its file descriptor to completely dispose of it. 213 + 214 + .. code-block:: c 215 + 216 + close(req_fd); 217 + 218 + Example for a Simple Capture Device 219 + ----------------------------------- 220 + 221 + With a simple capture device, requests can be used to specify controls to apply 222 + for a given CAPTURE buffer. 223 + 224 + .. code-block:: c 225 + 226 + struct v4l2_buffer buf; 227 + struct v4l2_ext_controls ctrls; 228 + int req_fd; 229 + ... 230 + if (ioctl(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd)) 231 + return errno; 232 + ... 233 + ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL; 234 + ctrls.request_fd = req_fd; 235 + if (ioctl(camera_fd, VIDIOC_S_EXT_CTRLS, &ctrls)) 236 + return errno; 237 + ... 238 + buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 239 + buf.flags |= V4L2_BUF_FLAG_REQUEST_FD; 240 + buf.request_fd = req_fd; 241 + if (ioctl(camera_fd, VIDIOC_QBUF, &buf)) 242 + return errno; 243 + 244 + Once the request is fully prepared, it can be queued to the driver: 245 + 246 + .. code-block:: c 247 + 248 + if (ioctl(req_fd, MEDIA_REQUEST_IOC_QUEUE)) 249 + return errno; 250 + 251 + User-space can then dequeue buffers, wait for the request completion, query 252 + controls and recycle the request as in the M2M example above.
+49
Documentation/media/uapi/mediactl/request-func-close.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _request-func-close: 4 + 5 + *************** 6 + request close() 7 + *************** 8 + 9 + Name 10 + ==== 11 + 12 + request-close - Close a request file descriptor 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. code-block:: c 19 + 20 + #include <unistd.h> 21 + 22 + 23 + .. c:function:: int close( int fd ) 24 + :name: req-close 25 + 26 + Arguments 27 + ========= 28 + 29 + ``fd`` 30 + File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`. 31 + 32 + 33 + Description 34 + =========== 35 + 36 + Closes the request file descriptor. Resources associated with the request 37 + are freed once all file descriptors associated with the request are closed 38 + and the driver has completed the request. 39 + See :ref:`here <media-request-life-time>` for more information. 40 + 41 + 42 + Return Value 43 + ============ 44 + 45 + :ref:`close() <request-func-close>` returns 0 on success. On error, -1 is 46 + returned, and ``errno`` is set appropriately. Possible error codes are: 47 + 48 + EBADF 49 + ``fd`` is not a valid open file descriptor.
+67
Documentation/media/uapi/mediactl/request-func-ioctl.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _request-func-ioctl: 4 + 5 + *************** 6 + request ioctl() 7 + *************** 8 + 9 + Name 10 + ==== 11 + 12 + request-ioctl - Control a request file descriptor 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. code-block:: c 19 + 20 + #include <sys/ioctl.h> 21 + 22 + 23 + .. c:function:: int ioctl( int fd, int cmd, void *argp ) 24 + :name: req-ioctl 25 + 26 + Arguments 27 + ========= 28 + 29 + ``fd`` 30 + File descriptor returned by :ref:`MEDIA_IOC_REQUEST_ALLOC`. 31 + 32 + ``cmd`` 33 + The request ioctl command code as defined in the media.h header file, for 34 + example :ref:`MEDIA_REQUEST_IOC_QUEUE`. 35 + 36 + ``argp`` 37 + Pointer to a request-specific structure. 38 + 39 + 40 + Description 41 + =========== 42 + 43 + The :ref:`ioctl() <request-func-ioctl>` function manipulates request 44 + parameters. The argument ``fd`` must be an open file descriptor. 45 + 46 + The ioctl ``cmd`` code specifies the request function to be called. It 47 + has encoded in it whether the argument is an input, output or read/write 48 + parameter, and the size of the argument ``argp`` in bytes. 49 + 50 + Macros and structures definitions specifying request ioctl commands and 51 + their parameters are located in the media.h header file. All request ioctl 52 + commands, their respective function and parameters are specified in 53 + :ref:`media-user-func`. 54 + 55 + 56 + Return Value 57 + ============ 58 + 59 + On success 0 is returned, on error -1 and the ``errno`` variable is set 60 + appropriately. The generic error codes are described at the 61 + :ref:`Generic Error Codes <gen-errors>` chapter. 62 + 63 + Command-specific error codes are listed in the individual command 64 + descriptions. 65 + 66 + When an ioctl that takes an output or read/write parameter fails, the 67 + parameter remains unmodified.
+77
Documentation/media/uapi/mediactl/request-func-poll.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections 2 + 3 + .. _request-func-poll: 4 + 5 + ************** 6 + request poll() 7 + ************** 8 + 9 + Name 10 + ==== 11 + 12 + request-poll - Wait for some event on a file descriptor 13 + 14 + 15 + Synopsis 16 + ======== 17 + 18 + .. code-block:: c 19 + 20 + #include <sys/poll.h> 21 + 22 + 23 + .. c:function:: int poll( struct pollfd *ufds, unsigned int nfds, int timeout ) 24 + :name: request-poll 25 + 26 + Arguments 27 + ========= 28 + 29 + ``ufds`` 30 + List of file descriptor events to be watched 31 + 32 + ``nfds`` 33 + Number of file descriptor events at the \*ufds array 34 + 35 + ``timeout`` 36 + Timeout to wait for events 37 + 38 + 39 + Description 40 + =========== 41 + 42 + With the :c:func:`poll() <request-func-poll>` function applications can wait 43 + for a request to complete. 44 + 45 + On success :c:func:`poll() <request-func-poll>` returns the number of file 46 + descriptors that have been selected (that is, file descriptors for which the 47 + ``revents`` field of the respective struct :c:type:`pollfd` 48 + is non-zero). Request file descriptor set the ``POLLPRI`` flag in ``revents`` 49 + when the request was completed. When the function times out it returns 50 + a value of zero, on failure it returns -1 and the ``errno`` variable is 51 + set appropriately. 52 + 53 + Attempting to poll for a request that is not yet queued will 54 + set the ``POLLERR`` flag in ``revents``. 55 + 56 + 57 + Return Value 58 + ============ 59 + 60 + On success, :c:func:`poll() <request-func-poll>` returns the number of 61 + structures which have non-zero ``revents`` fields, or zero if the call 62 + timed out. On error -1 is returned, and the ``errno`` variable is set 63 + appropriately: 64 + 65 + ``EBADF`` 66 + One or more of the ``ufds`` members specify an invalid file 67 + descriptor. 68 + 69 + ``EFAULT`` 70 + ``ufds`` references an inaccessible memory area. 71 + 72 + ``EINTR`` 73 + The call was interrupted by a signal. 74 + 75 + ``EINVAL`` 76 + The ``nfds`` value exceeds the ``RLIMIT_NOFILE`` value. Use 77 + ``getrlimit()`` to obtain this value.
+26 -3
Documentation/media/uapi/v4l/buffer.rst
··· 306 306 - A place holder for future extensions. Drivers and applications 307 307 must set this to 0. 308 308 * - __u32 309 - - ``reserved`` 309 + - ``request_fd`` 310 310 - 311 - - A place holder for future extensions. Drivers and applications 312 - must set this to 0. 311 + - The file descriptor of the request to queue the buffer to. If the flag 312 + ``V4L2_BUF_FLAG_REQUEST_FD`` is set, then the buffer will be 313 + queued to this request. If the flag is not set, then this field will 314 + be ignored. 315 + 316 + The ``V4L2_BUF_FLAG_REQUEST_FD`` flag and this field are only used by 317 + :ref:`ioctl VIDIOC_QBUF <VIDIOC_QBUF>` and ignored by other ioctls that 318 + take a :c:type:`v4l2_buffer` as argument. 319 + 320 + Applications should not set ``V4L2_BUF_FLAG_REQUEST_FD`` for any ioctls 321 + other than :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`. 322 + 323 + If the device does not support requests, then ``EACCES`` will be returned. 324 + If requests are supported but an invalid request file descriptor is 325 + given, then ``EINVAL`` will be returned. 313 326 314 327 315 328 ··· 527 514 streaming may continue as normal and the buffer may be reused 528 515 normally. Drivers set this flag when the ``VIDIOC_DQBUF`` ioctl is 529 516 called. 517 + * .. _`V4L2-BUF-FLAG-IN-REQUEST`: 518 + 519 + - ``V4L2_BUF_FLAG_IN_REQUEST`` 520 + - 0x00000080 521 + - This buffer is part of a request that hasn't been queued yet. 530 522 * .. _`V4L2-BUF-FLAG-KEYFRAME`: 531 523 532 524 - ``V4L2_BUF_FLAG_KEYFRAME`` ··· 607 589 the format. Any Any subsequent call to the 608 590 :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl will not block anymore, 609 591 but return an ``EPIPE`` error code. 592 + * .. _`V4L2-BUF-FLAG-REQUEST-FD`: 593 + 594 + - ``V4L2_BUF_FLAG_REQUEST_FD`` 595 + - 0x00800000 596 + - The ``request_fd`` field contains a valid file descriptor. 610 597 * .. _`V4L2-BUF-FLAG-TIMESTAMP-MASK`: 611 598 612 599 - ``V4L2_BUF_FLAG_TIMESTAMP_MASK``
+176
Documentation/media/uapi/v4l/extended-controls.rst
··· 1497 1497 1498 1498 1499 1499 1500 + .. _v4l2-mpeg-mpeg2: 1501 + 1502 + ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)`` 1503 + Specifies the slice parameters (as extracted from the bitstream) for the 1504 + associated MPEG-2 slice data. This includes the necessary parameters for 1505 + configuring a stateless hardware decoding pipeline for MPEG-2. 1506 + The bitstream parameters are defined according to :ref:`mpeg2part2`. 1507 + 1508 + .. c:type:: v4l2_ctrl_mpeg2_slice_params 1509 + 1510 + .. cssclass:: longtable 1511 + 1512 + .. flat-table:: struct v4l2_ctrl_mpeg2_slice_params 1513 + :header-rows: 0 1514 + :stub-columns: 0 1515 + :widths: 1 1 2 1516 + 1517 + * - __u32 1518 + - ``bit_size`` 1519 + - Size (in bits) of the current slice data. 1520 + * - __u32 1521 + - ``data_bit_offset`` 1522 + - Offset (in bits) to the video data in the current slice data. 1523 + * - struct :c:type:`v4l2_mpeg2_sequence` 1524 + - ``sequence`` 1525 + - Structure with MPEG-2 sequence metadata, merging relevant fields from 1526 + the sequence header and sequence extension parts of the bitstream. 1527 + * - struct :c:type:`v4l2_mpeg2_picture` 1528 + - ``picture`` 1529 + - Structure with MPEG-2 picture metadata, merging relevant fields from 1530 + the picture header and picture coding extension parts of the bitstream. 1531 + * - __u8 1532 + - ``quantiser_scale_code`` 1533 + - Code used to determine the quantization scale to use for the IDCT. 1534 + * - __u8 1535 + - ``backward_ref_index`` 1536 + - Index for the V4L2 buffer to use as backward reference, used with 1537 + B-coded and P-coded frames. 1538 + * - __u8 1539 + - ``forward_ref_index`` 1540 + - Index for the V4L2 buffer to use as forward reference, used with 1541 + B-coded frames. 1542 + 1543 + .. c:type:: v4l2_mpeg2_sequence 1544 + 1545 + .. cssclass:: longtable 1546 + 1547 + .. flat-table:: struct v4l2_mpeg2_sequence 1548 + :header-rows: 0 1549 + :stub-columns: 0 1550 + :widths: 1 1 2 1551 + 1552 + * - __u16 1553 + - ``horizontal_size`` 1554 + - The width of the displayable part of the frame's luminance component. 1555 + * - __u16 1556 + - ``vertical_size`` 1557 + - The height of the displayable part of the frame's luminance component. 1558 + * - __u32 1559 + - ``vbv_buffer_size`` 1560 + - Used to calculate the required size of the video buffering verifier, 1561 + defined (in bits) as: 16 * 1024 * vbv_buffer_size. 1562 + * - __u8 1563 + - ``profile_and_level_indication`` 1564 + - The current profile and level indication as extracted from the 1565 + bitstream. 1566 + * - __u8 1567 + - ``progressive_sequence`` 1568 + - Indication that all the frames for the sequence are progressive instead 1569 + of interlaced. 1570 + * - __u8 1571 + - ``chroma_format`` 1572 + - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4). 1573 + 1574 + .. c:type:: v4l2_mpeg2_picture 1575 + 1576 + .. cssclass:: longtable 1577 + 1578 + .. flat-table:: struct v4l2_mpeg2_picture 1579 + :header-rows: 0 1580 + :stub-columns: 0 1581 + :widths: 1 1 2 1582 + 1583 + * - __u8 1584 + - ``picture_coding_type`` 1585 + - Picture coding type for the frame covered by the current slice 1586 + (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or 1587 + V4L2_MPEG2_PICTURE_CODING_TYPE_B). 1588 + * - __u8 1589 + - ``f_code[2][2]`` 1590 + - Motion vector codes. 1591 + * - __u8 1592 + - ``intra_dc_precision`` 1593 + - Precision of Discrete Cosine transform (0: 8 bits precision, 1594 + 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision). 1595 + * - __u8 1596 + - ``picture_structure`` 1597 + - Picture structure (1: interlaced top field, 2: interlaced bottom field, 1598 + 3: progressive frame). 1599 + * - __u8 1600 + - ``top_field_first`` 1601 + - If set to 1 and interlaced stream, top field is output first. 1602 + * - __u8 1603 + - ``frame_pred_frame_dct`` 1604 + - If set to 1, only frame-DCT and frame prediction are used. 1605 + * - __u8 1606 + - ``concealment_motion_vectors`` 1607 + - If set to 1, motion vectors are coded for intra macroblocks. 1608 + * - __u8 1609 + - ``q_scale_type`` 1610 + - This flag affects the inverse quantization process. 1611 + * - __u8 1612 + - ``intra_vlc_format`` 1613 + - This flag affects the decoding of transform coefficient data. 1614 + * - __u8 1615 + - ``alternate_scan`` 1616 + - This flag affects the decoding of transform coefficient data. 1617 + * - __u8 1618 + - ``repeat_first_field`` 1619 + - This flag affects the decoding process of progressive frames. 1620 + * - __u8 1621 + - ``progressive_frame`` 1622 + - Indicates whether the current frame is progressive. 1623 + 1624 + ``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)`` 1625 + Specifies quantization matrices (as extracted from the bitstream) for the 1626 + associated MPEG-2 slice data. 1627 + 1628 + .. c:type:: v4l2_ctrl_mpeg2_quantization 1629 + 1630 + .. cssclass:: longtable 1631 + 1632 + .. flat-table:: struct v4l2_ctrl_mpeg2_quantization 1633 + :header-rows: 0 1634 + :stub-columns: 0 1635 + :widths: 1 1 2 1636 + 1637 + * - __u8 1638 + - ``load_intra_quantiser_matrix`` 1639 + - One bit to indicate whether to load the ``intra_quantiser_matrix`` data. 1640 + * - __u8 1641 + - ``load_non_intra_quantiser_matrix`` 1642 + - One bit to indicate whether to load the ``non_intra_quantiser_matrix`` 1643 + data. 1644 + * - __u8 1645 + - ``load_chroma_intra_quantiser_matrix`` 1646 + - One bit to indicate whether to load the 1647 + ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV 1648 + formats. 1649 + * - __u8 1650 + - ``load_chroma_non_intra_quantiser_matrix`` 1651 + - One bit to indicate whether to load the 1652 + ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0 1653 + YUV formats. 1654 + * - __u8 1655 + - ``intra_quantiser_matrix[64]`` 1656 + - The quantization matrix coefficients for intra-coded frames, in zigzag 1657 + scanning order. It is relevant for both luma and chroma components, 1658 + although it can be superseded by the chroma-specific matrix for 1659 + non-4:2:0 YUV formats. 1660 + * - __u8 1661 + - ``non_intra_quantiser_matrix[64]`` 1662 + - The quantization matrix coefficients for non-intra-coded frames, in 1663 + zigzag scanning order. It is relevant for both luma and chroma 1664 + components, although it can be superseded by the chroma-specific matrix 1665 + for non-4:2:0 YUV formats. 1666 + * - __u8 1667 + - ``chroma_intra_quantiser_matrix[64]`` 1668 + - The quantization matrix coefficients for the chominance component of 1669 + intra-coded frames, in zigzag scanning order. Only relevant for 1670 + non-4:2:0 YUV formats. 1671 + * - __u8 1672 + - ``chroma_non_intra_quantiser_matrix[64]`` 1673 + - The quantization matrix coefficients for the chrominance component of 1674 + non-intra-coded frames, in zigzag scanning order. Only relevant for 1675 + non-4:2:0 YUV formats. 1500 1676 1501 1677 MFC 5.1 MPEG Controls 1502 1678 ---------------------
+16
Documentation/media/uapi/v4l/pixfmt-compressed.rst
··· 60 60 - ``V4L2_PIX_FMT_MPEG2`` 61 61 - 'MPG2' 62 62 - MPEG2 video elementary stream. 63 + * .. _V4L2-PIX-FMT-MPEG2-SLICE: 64 + 65 + - ``V4L2_PIX_FMT_MPEG2_SLICE`` 66 + - 'MG2S' 67 + - MPEG-2 parsed slice data, as extracted from the MPEG-2 bitstream. 68 + This format is adapted for stateless video decoders that implement a 69 + MPEG-2 pipeline (using the :ref:`codec` and :ref:`media-request-api`). 70 + Metadata associated with the frame to decode is required to be passed 71 + through the ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS`` control and 72 + quantization matrices can optionally be specified through the 73 + ``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION`` control. 74 + See the :ref:`associated Codec Control IDs <v4l2-mpeg-mpeg2>`. 75 + Exactly one output and one capture buffer must be provided for use with 76 + this pixel format. The output buffer must contain the appropriate number 77 + of macroblocks to decode a full corresponding frame to the matching 78 + capture buffer. 63 79 * .. _V4L2-PIX-FMT-MPEG4: 64 80 65 81 - ``V4L2_PIX_FMT_MPEG4``
+13
Documentation/media/uapi/v4l/pixfmt-reserved.rst
··· 243 243 It is an opaque intermediate format and the MDP hardware must be 244 244 used to convert ``V4L2_PIX_FMT_MT21C`` to ``V4L2_PIX_FMT_NV12M``, 245 245 ``V4L2_PIX_FMT_YUV420M`` or ``V4L2_PIX_FMT_YVU420``. 246 + * .. _V4L2-PIX-FMT-SUNXI-TILED-NV12: 246 247 248 + - ``V4L2_PIX_FMT_SUNXI_TILED_NV12`` 249 + - 'ST12' 250 + - Two-planar NV12-based format used by the video engine found on Allwinner 251 + (codenamed sunxi) platforms, with 32x32 tiles for the luminance plane 252 + and 32x64 tiles for the chrominance plane. The data in each tile is 253 + stored in linear order, within the tile bounds. Each tile follows the 254 + previous one linearly in memory (from left to right, top to bottom). 255 + 256 + The associated buffer dimensions are aligned to match an integer number 257 + of tiles, resulting in 32-aligned resolutions for the luminance plane 258 + and 16-aligned resolutions for the chrominance plane (with 2x2 259 + subsampling). 247 260 248 261 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}| 249 262
+13 -1
Documentation/media/uapi/v4l/vidioc-create-bufs.rst
··· 102 102 - ``format`` 103 103 - Filled in by the application, preserved by the driver. 104 104 * - __u32 105 - - ``reserved``\ [8] 105 + - ``capabilities`` 106 + - Set by the driver. If 0, then the driver doesn't support 107 + capabilities. In that case all you know is that the driver is 108 + guaranteed to support ``V4L2_MEMORY_MMAP`` and *might* support 109 + other :c:type:`v4l2_memory` types. It will not support any others 110 + capabilities. See :ref:`here <v4l2-buf-capabilities>` for a list of the 111 + capabilities. 112 + 113 + If you want to just query the capabilities without making any 114 + other changes, then set ``count`` to 0, ``memory`` to 115 + ``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type. 116 + * - __u32 117 + - ``reserved``\ [7] 106 118 - A place holder for future extensions. Drivers and applications 107 119 must set the array to zero. 108 120
+49 -10
Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
··· 95 95 is inappropriate (e.g. the given menu index is not supported by the menu 96 96 control), then this will also result in an ``EINVAL`` error code error. 97 97 98 + If ``request_fd`` is set to a not-yet-queued :ref:`request <media-request-api>` 99 + file descriptor and ``which`` is set to ``V4L2_CTRL_WHICH_REQUEST_VAL``, 100 + then the controls are not applied immediately when calling 101 + :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, but instead are applied by 102 + the driver for the buffer associated with the same request. 103 + If the device does not support requests, then ``EACCES`` will be returned. 104 + If requests are supported but an invalid request file descriptor is given, 105 + then ``EINVAL`` will be returned. 106 + 107 + An attempt to call :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` for a 108 + request that has already been queued will result in an ``EBUSY`` error. 109 + 110 + If ``request_fd`` is specified and ``which`` is set to 111 + ``V4L2_CTRL_WHICH_REQUEST_VAL`` during a call to 112 + :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`, then it will return the 113 + values of the controls at the time of request completion. 114 + If the request is not yet completed, then this will result in an 115 + ``EACCES`` error. 116 + 98 117 The driver will only set/get these controls if all control values are 99 118 correct. This prevents the situation where only some of the controls 100 119 were set/get. Only low-level errors (e. g. a failed i2c command) can ··· 228 209 - ``which`` 229 210 - Which value of the control to get/set/try. 230 211 ``V4L2_CTRL_WHICH_CUR_VAL`` will return the current value of the 231 - control and ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default 232 - value of the control. 212 + control, ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default 213 + value of the control and ``V4L2_CTRL_WHICH_REQUEST_VAL`` indicates that 214 + these controls have to be retrieved from a request or tried/set for 215 + a request. In the latter case the ``request_fd`` field contains the 216 + file descriptor of the request that should be used. If the device 217 + does not support requests, then ``EACCES`` will be returned. 233 218 234 219 .. note:: 235 220 236 - You can only get the default value of the control, 237 - you cannot set or try it. 221 + When using ``V4L2_CTRL_WHICH_DEF_VAL`` be aware that you can only 222 + get the default value of the control, you cannot set or try it. 238 223 239 224 For backwards compatibility you can also use a control class here 240 225 (see :ref:`ctrl-class`). In that case all controls have to ··· 295 272 then you can call :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` to try to discover the 296 273 actual control that failed the validation step. Unfortunately, 297 274 there is no ``TRY`` equivalent for :ref:`VIDIOC_G_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>`. 275 + * - __s32 276 + - ``request_fd`` 277 + - File descriptor of the request to be used by this operation. Only 278 + valid if ``which`` is set to ``V4L2_CTRL_WHICH_REQUEST_VAL``. 279 + If the device does not support requests, then ``EACCES`` will be returned. 280 + If requests are supported but an invalid request file descriptor is 281 + given, then ``EINVAL`` will be returned. 298 282 * - __u32 299 - - ``reserved``\ [2] 283 + - ``reserved``\ [1] 300 284 - Reserved for future extensions. 301 285 302 286 Drivers and applications must set the array to zero. ··· 377 347 378 348 EINVAL 379 349 The struct :c:type:`v4l2_ext_control` ``id`` is 380 - invalid, the struct :c:type:`v4l2_ext_controls` 350 + invalid, or the struct :c:type:`v4l2_ext_controls` 381 351 ``which`` is invalid, or the struct 382 352 :c:type:`v4l2_ext_control` ``value`` was 383 353 inappropriate (e.g. the given menu index is not supported by the 384 - driver). This error code is also returned by the 354 + driver), or the ``which`` field was set to ``V4L2_CTRL_WHICH_REQUEST_VAL`` 355 + but the given ``request_fd`` was invalid or ``V4L2_CTRL_WHICH_REQUEST_VAL`` 356 + is not supported by the kernel. 357 + This error code is also returned by the 385 358 :ref:`VIDIOC_S_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` and :ref:`VIDIOC_TRY_EXT_CTRLS <VIDIOC_G_EXT_CTRLS>` ioctls if two or 386 359 more control values are in conflict. 387 360 ··· 395 362 EBUSY 396 363 The control is temporarily not changeable, possibly because another 397 364 applications took over control of the device function this control 398 - belongs to. 365 + belongs to, or (if the ``which`` field was set to 366 + ``V4L2_CTRL_WHICH_REQUEST_VAL``) the request was queued but not yet 367 + completed. 399 368 400 369 ENOSPC 401 370 The space reserved for the control's payload is insufficient. The ··· 405 370 and this error code is returned. 406 371 407 372 EACCES 408 - Attempt to try or set a read-only control or to get a write-only 409 - control. 373 + Attempt to try or set a read-only control, or to get a write-only 374 + control, or to get a control from a request that has not yet been 375 + completed. 376 + 377 + Or the ``which`` field was set to ``V4L2_CTRL_WHICH_REQUEST_VAL`` but the 378 + device does not support requests.
+35 -2
Documentation/media/uapi/v4l/vidioc-qbuf.rst
··· 65 65 with a pointer to this structure the driver sets the 66 66 ``V4L2_BUF_FLAG_MAPPED`` and ``V4L2_BUF_FLAG_QUEUED`` flags and clears 67 67 the ``V4L2_BUF_FLAG_DONE`` flag in the ``flags`` field, or it returns an 68 - EINVAL error code. 68 + ``EINVAL`` error code. 69 69 70 70 To enqueue a :ref:`user pointer <userp>` buffer applications set the 71 71 ``memory`` field to ``V4L2_MEMORY_USERPTR``, the ``m.userptr`` field to ··· 97 97 dequeued, until the :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` or 98 98 :ref:`VIDIOC_REQBUFS` ioctl is called, or until the 99 99 device is closed. 100 + 101 + The ``request_fd`` field can be used with the ``VIDIOC_QBUF`` ioctl to specify 102 + the file descriptor of a :ref:`request <media-request-api>`, if requests are 103 + in use. Setting it means that the buffer will not be passed to the driver 104 + until the request itself is queued. Also, the driver will apply any 105 + settings associated with the request for this buffer. This field will 106 + be ignored unless the ``V4L2_BUF_FLAG_REQUEST_FD`` flag is set. 107 + If the device does not support requests, then ``EACCES`` will be returned. 108 + If requests are supported but an invalid request file descriptor is given, 109 + then ``EINVAL`` will be returned. 110 + 111 + .. caution:: 112 + It is not allowed to mix queuing requests with queuing buffers directly. 113 + ``EBUSY`` will be returned if the first buffer was queued directly and 114 + then the application tries to queue a request, or vice versa. After 115 + closing the file descriptor, calling 116 + :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` or calling :ref:`VIDIOC_REQBUFS` 117 + the check for this will be reset. 118 + 119 + For :ref:`memory-to-memory devices <codec>` you can specify the 120 + ``request_fd`` only for output buffers, not for capture buffers. Attempting 121 + to specify this for a capture buffer will result in an ``EACCES`` error. 100 122 101 123 Applications call the ``VIDIOC_DQBUF`` ioctl to dequeue a filled 102 124 (capturing) or displayed (output) buffer from the driver's outgoing ··· 155 133 EINVAL 156 134 The buffer ``type`` is not supported, or the ``index`` is out of 157 135 bounds, or no buffers have been allocated yet, or the ``userptr`` or 158 - ``length`` are invalid. 136 + ``length`` are invalid, or the ``V4L2_BUF_FLAG_REQUEST_FD`` flag was 137 + set but the the given ``request_fd`` was invalid, or ``m.fd`` was 138 + an invalid DMABUF file descriptor. 159 139 160 140 EIO 161 141 ``VIDIOC_DQBUF`` failed due to an internal error. Can also indicate ··· 177 153 ``VIDIOC_DQBUF`` returns this on an empty capture queue for mem2mem 178 154 codecs if a buffer with the ``V4L2_BUF_FLAG_LAST`` was already 179 155 dequeued and no new buffers are expected to become available. 156 + 157 + EACCES 158 + The ``V4L2_BUF_FLAG_REQUEST_FD`` flag was set but the device does not 159 + support requests for the given buffer type. 160 + 161 + EBUSY 162 + The first buffer was queued via a request, but the application now tries 163 + to queue it directly, or vice versa (it is not permitted to mix the two 164 + APIs).
+12 -2
Documentation/media/uapi/v4l/vidioc-queryctrl.rst
··· 424 424 - any 425 425 - An unsigned 32-bit valued control ranging from minimum to maximum 426 426 inclusive. The step value indicates the increment between values. 427 - 428 - 427 + * - ``V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS`` 428 + - n/a 429 + - n/a 430 + - n/a 431 + - A struct :c:type:`v4l2_ctrl_mpeg2_slice_params`, containing MPEG-2 432 + slice parameters for stateless video decoders. 433 + * - ``V4L2_CTRL_TYPE_MPEG2_QUANTIZATION`` 434 + - n/a 435 + - n/a 436 + - n/a 437 + - A struct :c:type:`v4l2_ctrl_mpeg2_quantization`, containing MPEG-2 438 + quantization matrices for stateless video decoders. 429 439 430 440 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}| 431 441
+41 -1
Documentation/media/uapi/v4l/vidioc-reqbufs.rst
··· 88 88 ``V4L2_MEMORY_DMABUF`` or ``V4L2_MEMORY_USERPTR``. See 89 89 :c:type:`v4l2_memory`. 90 90 * - __u32 91 - - ``reserved``\ [2] 91 + - ``capabilities`` 92 + - Set by the driver. If 0, then the driver doesn't support 93 + capabilities. In that case all you know is that the driver is 94 + guaranteed to support ``V4L2_MEMORY_MMAP`` and *might* support 95 + other :c:type:`v4l2_memory` types. It will not support any others 96 + capabilities. 97 + 98 + If you want to query the capabilities with a minimum of side-effects, 99 + then this can be called with ``count`` set to 0, ``memory`` set to 100 + ``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will 101 + free any previously allocated buffers, so this is typically something 102 + that will be done at the start of the application. 103 + * - __u32 104 + - ``reserved``\ [1] 92 105 - A place holder for future extensions. Drivers and applications 93 106 must set the array to zero. 94 107 108 + .. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}| 109 + 110 + .. _v4l2-buf-capabilities: 111 + .. _V4L2-BUF-CAP-SUPPORTS-MMAP: 112 + .. _V4L2-BUF-CAP-SUPPORTS-USERPTR: 113 + .. _V4L2-BUF-CAP-SUPPORTS-DMABUF: 114 + .. _V4L2-BUF-CAP-SUPPORTS-REQUESTS: 115 + 116 + .. cssclass:: longtable 117 + 118 + .. flat-table:: V4L2 Buffer Capabilities Flags 119 + :header-rows: 0 120 + :stub-columns: 0 121 + :widths: 3 1 4 122 + 123 + * - ``V4L2_BUF_CAP_SUPPORTS_MMAP`` 124 + - 0x00000001 125 + - This buffer type supports the ``V4L2_MEMORY_MMAP`` streaming mode. 126 + * - ``V4L2_BUF_CAP_SUPPORTS_USERPTR`` 127 + - 0x00000002 128 + - This buffer type supports the ``V4L2_MEMORY_USERPTR`` streaming mode. 129 + * - ``V4L2_BUF_CAP_SUPPORTS_DMABUF`` 130 + - 0x00000004 131 + - This buffer type supports the ``V4L2_MEMORY_DMABUF`` streaming mode. 132 + * - ``V4L2_BUF_CAP_SUPPORTS_REQUESTS`` 133 + - 0x00000008 134 + - This buffer type supports :ref:`requests <media-request-api>`. 95 135 96 136 Return Value 97 137 ============
+3
Documentation/media/videodev2.h.rst.exceptions
··· 131 131 replace symbol V4L2_CTRL_TYPE_U16 :c:type:`v4l2_ctrl_type` 132 132 replace symbol V4L2_CTRL_TYPE_U32 :c:type:`v4l2_ctrl_type` 133 133 replace symbol V4L2_CTRL_TYPE_U8 :c:type:`v4l2_ctrl_type` 134 + replace symbol V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS :c:type:`v4l2_ctrl_type` 135 + replace symbol V4L2_CTRL_TYPE_MPEG2_QUANTIZATION :c:type:`v4l2_ctrl_type` 134 136 135 137 # V4L2 capability defines 136 138 replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities ··· 519 517 ignore define V4L2_CTRL_MAX_DIMS 520 518 ignore define V4L2_CTRL_WHICH_CUR_VAL 521 519 ignore define V4L2_CTRL_WHICH_DEF_VAL 520 + ignore define V4L2_CTRL_WHICH_REQUEST_VAL 522 521 ignore define V4L2_OUT_CAP_CUSTOM_TIMINGS 523 522 ignore define V4L2_CID_MAX_CTRLS 524 523
+7
MAINTAINERS
··· 671 671 S: Maintained 672 672 F: drivers/crypto/sunxi-ss/ 673 673 674 + ALLWINNER VPU DRIVER 675 + M: Maxime Ripard <maxime.ripard@bootlin.com> 676 + M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 677 + L: linux-media@vger.kernel.org 678 + S: Maintained 679 + F: drivers/staging/media/sunxi/cedrus/ 680 + 674 681 ALPHA PORT 675 682 M: Richard Henderson <rth@twiddle.net> 676 683 M: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+2 -1
drivers/media/Makefile
··· 3 3 # Makefile for the kernel multimedia device drivers. 4 4 # 5 5 6 - media-objs := media-device.o media-devnode.o media-entity.o 6 + media-objs := media-device.o media-devnode.o media-entity.o \ 7 + media-request.o 7 8 8 9 # 9 10 # I2C drivers should come before other drivers, otherwise they'll fail
+218 -42
drivers/media/common/videobuf2/videobuf2-core.c
··· 356 356 vb->planes[plane].length = plane_sizes[plane]; 357 357 vb->planes[plane].min_length = plane_sizes[plane]; 358 358 } 359 + call_void_bufop(q, init_buffer, vb); 360 + 359 361 q->bufs[vb->index] = vb; 360 362 361 363 /* Allocate video buffer memory for the MMAP type */ ··· 499 497 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", 500 498 vb->cnt_buf_init, vb->cnt_buf_cleanup, 501 499 vb->cnt_buf_prepare, vb->cnt_buf_finish); 502 - pr_info(" buf_queue: %u buf_done: %u\n", 503 - vb->cnt_buf_queue, vb->cnt_buf_done); 500 + pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n", 501 + vb->cnt_buf_queue, vb->cnt_buf_done, 502 + vb->cnt_buf_request_complete); 504 503 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", 505 504 vb->cnt_mem_alloc, vb->cnt_mem_put, 506 505 vb->cnt_mem_prepare, vb->cnt_mem_finish, ··· 686 683 } 687 684 688 685 /* 689 - * Call queue_cancel to clean up any buffers in the PREPARED or 686 + * Call queue_cancel to clean up any buffers in the 690 687 * QUEUED state which is possible if buffers were prepared or 691 688 * queued without ever calling STREAMON. 692 689 */ ··· 933 930 /* sync buffers */ 934 931 for (plane = 0; plane < vb->num_planes; ++plane) 935 932 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 933 + vb->synced = false; 936 934 } 937 935 938 936 spin_lock_irqsave(&q->done_lock, flags); ··· 946 942 vb->state = state; 947 943 } 948 944 atomic_dec(&q->owned_by_drv_count); 945 + 946 + if (vb->req_obj.req) { 947 + /* This is not supported at the moment */ 948 + WARN_ON(state == VB2_BUF_STATE_REQUEUEING); 949 + media_request_object_unbind(&vb->req_obj); 950 + media_request_object_put(&vb->req_obj); 951 + } 952 + 949 953 spin_unlock_irqrestore(&q->done_lock, flags); 950 954 951 955 trace_vb2_buf_done(q, vb); ··· 988 976 /* 989 977 * __prepare_mmap() - prepare an MMAP buffer 990 978 */ 991 - static int __prepare_mmap(struct vb2_buffer *vb, const void *pb) 979 + static int __prepare_mmap(struct vb2_buffer *vb) 992 980 { 993 981 int ret = 0; 994 982 995 - if (pb) 996 - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 997 - vb, pb, vb->planes); 983 + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 984 + vb, vb->planes); 998 985 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 999 986 } 1000 987 1001 988 /* 1002 989 * __prepare_userptr() - prepare a USERPTR buffer 1003 990 */ 1004 - static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) 991 + static int __prepare_userptr(struct vb2_buffer *vb) 1005 992 { 1006 993 struct vb2_plane planes[VB2_MAX_PLANES]; 1007 994 struct vb2_queue *q = vb->vb2_queue; ··· 1011 1000 1012 1001 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1013 1002 /* Copy relevant information provided by the userspace */ 1014 - if (pb) { 1015 - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1016 - vb, pb, planes); 1017 - if (ret) 1018 - return ret; 1019 - } 1003 + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1004 + vb, planes); 1005 + if (ret) 1006 + return ret; 1020 1007 1021 1008 for (plane = 0; plane < vb->num_planes; ++plane) { 1022 1009 /* Skip the plane if already verified */ ··· 1114 1105 /* 1115 1106 * __prepare_dmabuf() - prepare a DMABUF buffer 1116 1107 */ 1117 - static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) 1108 + static int __prepare_dmabuf(struct vb2_buffer *vb) 1118 1109 { 1119 1110 struct vb2_plane planes[VB2_MAX_PLANES]; 1120 1111 struct vb2_queue *q = vb->vb2_queue; ··· 1125 1116 1126 1117 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1127 1118 /* Copy relevant information provided by the userspace */ 1128 - if (pb) { 1129 - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1130 - vb, pb, planes); 1131 - if (ret) 1132 - return ret; 1133 - } 1119 + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1120 + vb, planes); 1121 + if (ret) 1122 + return ret; 1134 1123 1135 1124 for (plane = 0; plane < vb->num_planes; ++plane) { 1136 1125 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); ··· 1257 1250 call_void_vb_qop(vb, buf_queue, vb); 1258 1251 } 1259 1252 1260 - static int __buf_prepare(struct vb2_buffer *vb, const void *pb) 1253 + static int __buf_prepare(struct vb2_buffer *vb) 1261 1254 { 1262 1255 struct vb2_queue *q = vb->vb2_queue; 1256 + enum vb2_buffer_state orig_state = vb->state; 1263 1257 unsigned int plane; 1264 1258 int ret; 1265 1259 ··· 1269 1261 return -EIO; 1270 1262 } 1271 1263 1264 + if (vb->prepared) 1265 + return 0; 1266 + WARN_ON(vb->synced); 1267 + 1272 1268 vb->state = VB2_BUF_STATE_PREPARING; 1273 1269 1274 1270 switch (q->memory) { 1275 1271 case VB2_MEMORY_MMAP: 1276 - ret = __prepare_mmap(vb, pb); 1272 + ret = __prepare_mmap(vb); 1277 1273 break; 1278 1274 case VB2_MEMORY_USERPTR: 1279 - ret = __prepare_userptr(vb, pb); 1275 + ret = __prepare_userptr(vb); 1280 1276 break; 1281 1277 case VB2_MEMORY_DMABUF: 1282 - ret = __prepare_dmabuf(vb, pb); 1278 + ret = __prepare_dmabuf(vb); 1283 1279 break; 1284 1280 default: 1285 1281 WARN(1, "Invalid queue type\n"); 1286 1282 ret = -EINVAL; 1283 + break; 1287 1284 } 1288 1285 1289 1286 if (ret) { 1290 1287 dprintk(1, "buffer preparation failed: %d\n", ret); 1291 - vb->state = VB2_BUF_STATE_DEQUEUED; 1288 + vb->state = orig_state; 1292 1289 return ret; 1293 1290 } 1294 1291 ··· 1301 1288 for (plane = 0; plane < vb->num_planes; ++plane) 1302 1289 call_void_memop(vb, prepare, vb->planes[plane].mem_priv); 1303 1290 1304 - vb->state = VB2_BUF_STATE_PREPARED; 1291 + vb->synced = true; 1292 + vb->prepared = true; 1293 + vb->state = orig_state; 1305 1294 1306 1295 return 0; 1307 1296 } 1297 + 1298 + static int vb2_req_prepare(struct media_request_object *obj) 1299 + { 1300 + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1301 + int ret; 1302 + 1303 + if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1304 + return -EINVAL; 1305 + 1306 + mutex_lock(vb->vb2_queue->lock); 1307 + ret = __buf_prepare(vb); 1308 + mutex_unlock(vb->vb2_queue->lock); 1309 + return ret; 1310 + } 1311 + 1312 + static void __vb2_dqbuf(struct vb2_buffer *vb); 1313 + 1314 + static void vb2_req_unprepare(struct media_request_object *obj) 1315 + { 1316 + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1317 + 1318 + mutex_lock(vb->vb2_queue->lock); 1319 + __vb2_dqbuf(vb); 1320 + vb->state = VB2_BUF_STATE_IN_REQUEST; 1321 + mutex_unlock(vb->vb2_queue->lock); 1322 + WARN_ON(!vb->req_obj.req); 1323 + } 1324 + 1325 + int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1326 + struct media_request *req); 1327 + 1328 + static void vb2_req_queue(struct media_request_object *obj) 1329 + { 1330 + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1331 + 1332 + mutex_lock(vb->vb2_queue->lock); 1333 + vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); 1334 + mutex_unlock(vb->vb2_queue->lock); 1335 + } 1336 + 1337 + static void vb2_req_unbind(struct media_request_object *obj) 1338 + { 1339 + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1340 + 1341 + if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1342 + call_void_bufop(vb->vb2_queue, init_buffer, vb); 1343 + } 1344 + 1345 + static void vb2_req_release(struct media_request_object *obj) 1346 + { 1347 + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1348 + 1349 + if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1350 + vb->state = VB2_BUF_STATE_DEQUEUED; 1351 + } 1352 + 1353 + static const struct media_request_object_ops vb2_core_req_ops = { 1354 + .prepare = vb2_req_prepare, 1355 + .unprepare = vb2_req_unprepare, 1356 + .queue = vb2_req_queue, 1357 + .unbind = vb2_req_unbind, 1358 + .release = vb2_req_release, 1359 + }; 1360 + 1361 + bool vb2_request_object_is_buffer(struct media_request_object *obj) 1362 + { 1363 + return obj->ops == &vb2_core_req_ops; 1364 + } 1365 + EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1366 + 1367 + unsigned int vb2_request_buffer_cnt(struct media_request *req) 1368 + { 1369 + struct media_request_object *obj; 1370 + unsigned long flags; 1371 + unsigned int buffer_cnt = 0; 1372 + 1373 + spin_lock_irqsave(&req->lock, flags); 1374 + list_for_each_entry(obj, &req->objects, list) 1375 + if (vb2_request_object_is_buffer(obj)) 1376 + buffer_cnt++; 1377 + spin_unlock_irqrestore(&req->lock, flags); 1378 + 1379 + return buffer_cnt; 1380 + } 1381 + EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1308 1382 1309 1383 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) 1310 1384 { ··· 1404 1304 vb->state); 1405 1305 return -EINVAL; 1406 1306 } 1307 + if (vb->prepared) { 1308 + dprintk(1, "buffer already prepared\n"); 1309 + return -EINVAL; 1310 + } 1407 1311 1408 - ret = __buf_prepare(vb, pb); 1312 + ret = __buf_prepare(vb); 1409 1313 if (ret) 1410 1314 return ret; 1411 1315 ··· 1418 1314 1419 1315 dprintk(2, "prepare of buffer %d succeeded\n", vb->index); 1420 1316 1421 - return ret; 1317 + return 0; 1422 1318 } 1423 1319 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1424 1320 ··· 1485 1381 return ret; 1486 1382 } 1487 1383 1488 - int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) 1384 + int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1385 + struct media_request *req) 1489 1386 { 1490 1387 struct vb2_buffer *vb; 1491 1388 int ret; ··· 1498 1393 1499 1394 vb = q->bufs[index]; 1500 1395 1501 - switch (vb->state) { 1502 - case VB2_BUF_STATE_DEQUEUED: 1503 - ret = __buf_prepare(vb, pb); 1396 + if ((req && q->uses_qbuf) || 1397 + (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1398 + q->uses_requests)) { 1399 + dprintk(1, "queue in wrong mode (qbuf vs requests)\n"); 1400 + return -EBUSY; 1401 + } 1402 + 1403 + if (req) { 1404 + int ret; 1405 + 1406 + q->uses_requests = 1; 1407 + if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1408 + dprintk(1, "buffer %d not in dequeued state\n", 1409 + vb->index); 1410 + return -EINVAL; 1411 + } 1412 + 1413 + media_request_object_init(&vb->req_obj); 1414 + 1415 + /* Make sure the request is in a safe state for updating. */ 1416 + ret = media_request_lock_for_update(req); 1504 1417 if (ret) 1505 1418 return ret; 1506 - break; 1507 - case VB2_BUF_STATE_PREPARED: 1419 + ret = media_request_object_bind(req, &vb2_core_req_ops, 1420 + q, true, &vb->req_obj); 1421 + media_request_unlock_for_update(req); 1422 + if (ret) 1423 + return ret; 1424 + 1425 + vb->state = VB2_BUF_STATE_IN_REQUEST; 1426 + /* Fill buffer information for the userspace */ 1427 + if (pb) { 1428 + call_void_bufop(q, copy_timestamp, vb, pb); 1429 + call_void_bufop(q, fill_user_buffer, vb, pb); 1430 + } 1431 + 1432 + dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); 1433 + return 0; 1434 + } 1435 + 1436 + if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1437 + q->uses_qbuf = 1; 1438 + 1439 + switch (vb->state) { 1440 + case VB2_BUF_STATE_DEQUEUED: 1441 + case VB2_BUF_STATE_IN_REQUEST: 1442 + if (!vb->prepared) { 1443 + ret = __buf_prepare(vb); 1444 + if (ret) 1445 + return ret; 1446 + } 1508 1447 break; 1509 1448 case VB2_BUF_STATE_PREPARING: 1510 1449 dprintk(1, "buffer still being prepared\n"); ··· 1749 1600 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); 1750 1601 vb->planes[i].dbuf_mapped = 0; 1751 1602 } 1603 + if (vb->req_obj.req) { 1604 + media_request_object_unbind(&vb->req_obj); 1605 + media_request_object_put(&vb->req_obj); 1606 + } 1607 + call_void_bufop(q, init_buffer, vb); 1752 1608 } 1753 1609 1754 1610 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, ··· 1779 1625 } 1780 1626 1781 1627 call_void_vb_qop(vb, buf_finish, vb); 1628 + vb->prepared = false; 1782 1629 1783 1630 if (pindex) 1784 1631 *pindex = vb->index; ··· 1843 1688 q->start_streaming_called = 0; 1844 1689 q->queued_count = 0; 1845 1690 q->error = 0; 1691 + q->uses_requests = 0; 1692 + q->uses_qbuf = 0; 1846 1693 1847 1694 /* 1848 1695 * Remove all buffers from videobuf's list... ··· 1869 1712 */ 1870 1713 for (i = 0; i < q->num_buffers; ++i) { 1871 1714 struct vb2_buffer *vb = q->bufs[i]; 1715 + struct media_request *req = vb->req_obj.req; 1872 1716 1873 - if (vb->state == VB2_BUF_STATE_PREPARED || 1874 - vb->state == VB2_BUF_STATE_QUEUED) { 1717 + /* 1718 + * If a request is associated with this buffer, then 1719 + * call buf_request_cancel() to give the driver to complete() 1720 + * related request objects. Otherwise those objects would 1721 + * never complete. 1722 + */ 1723 + if (req) { 1724 + enum media_request_state state; 1725 + unsigned long flags; 1726 + 1727 + spin_lock_irqsave(&req->lock, flags); 1728 + state = req->state; 1729 + spin_unlock_irqrestore(&req->lock, flags); 1730 + 1731 + if (state == MEDIA_REQUEST_STATE_QUEUED) 1732 + call_void_vb_qop(vb, buf_request_complete, vb); 1733 + } 1734 + 1735 + if (vb->synced) { 1875 1736 unsigned int plane; 1876 1737 1877 1738 for (plane = 0; plane < vb->num_planes; ++plane) 1878 1739 call_void_memop(vb, finish, 1879 1740 vb->planes[plane].mem_priv); 1741 + vb->synced = false; 1880 1742 } 1881 1743 1882 - if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1883 - vb->state = VB2_BUF_STATE_PREPARED; 1744 + if (vb->prepared) { 1884 1745 call_void_vb_qop(vb, buf_finish, vb); 1746 + vb->prepared = false; 1885 1747 } 1886 1748 __vb2_dqbuf(vb); 1887 1749 } ··· 2457 2281 * Queue all buffers. 2458 2282 */ 2459 2283 for (i = 0; i < q->num_buffers; i++) { 2460 - ret = vb2_core_qbuf(q, i, NULL); 2284 + ret = vb2_core_qbuf(q, i, NULL, NULL); 2461 2285 if (ret) 2462 2286 goto err_reqbufs; 2463 2287 fileio->bufs[i].queued = 1; ··· 2636 2460 2637 2461 if (copy_timestamp) 2638 2462 b->timestamp = ktime_get_ns(); 2639 - ret = vb2_core_qbuf(q, index, NULL); 2463 + ret = vb2_core_qbuf(q, index, NULL, NULL); 2640 2464 dprintk(5, "vb2_dbuf result: %d\n", ret); 2641 2465 if (ret) 2642 2466 return ret; ··· 2739 2563 if (copy_timestamp) 2740 2564 vb->timestamp = ktime_get_ns(); 2741 2565 if (!threadio->stop) 2742 - ret = vb2_core_qbuf(q, vb->index, NULL); 2566 + ret = vb2_core_qbuf(q, vb->index, NULL, NULL); 2743 2567 call_void_qop(q, wait_prepare, q); 2744 2568 if (ret || threadio->stop) 2745 2569 break;
+373 -155
drivers/media/common/videobuf2/videobuf2-v4l2.c
··· 25 25 #include <linux/kthread.h> 26 26 27 27 #include <media/v4l2-dev.h> 28 + #include <media/v4l2-device.h> 28 29 #include <media/v4l2-fh.h> 29 30 #include <media/v4l2-event.h> 30 31 #include <media/v4l2-common.h> ··· 41 40 pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \ 42 41 } while (0) 43 42 44 - /* Flags that are set by the vb2 core */ 43 + /* Flags that are set by us */ 45 44 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ 46 45 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ 47 46 V4L2_BUF_FLAG_PREPARED | \ 47 + V4L2_BUF_FLAG_IN_REQUEST | \ 48 + V4L2_BUF_FLAG_REQUEST_FD | \ 48 49 V4L2_BUF_FLAG_TIMESTAMP_MASK) 49 50 /* Output buffer flags that should be passed on to the driver */ 50 51 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ ··· 121 118 return 0; 122 119 } 123 120 121 + /* 122 + * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct 123 + */ 124 + static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb) 125 + { 126 + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 127 + 128 + vbuf->request_fd = -1; 129 + } 130 + 124 131 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) 125 132 { 126 133 const struct v4l2_buffer *b = pb; ··· 167 154 pr_warn("use the actual size instead.\n"); 168 155 } 169 156 170 - static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, 171 - const char *opname) 157 + static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) 172 158 { 159 + struct vb2_queue *q = vb->vb2_queue; 160 + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 161 + struct vb2_plane *planes = vbuf->planes; 162 + unsigned int plane; 163 + int ret; 164 + 165 + ret = __verify_length(vb, b); 166 + if (ret < 0) { 167 + dprintk(1, "plane parameters verification failed: %d\n", ret); 168 + return ret; 169 + } 170 + if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { 171 + /* 172 + * If the format's field is ALTERNATE, then the buffer's field 173 + * should be either TOP or BOTTOM, not ALTERNATE since that 174 + * makes no sense. The driver has to know whether the 175 + * buffer represents a top or a bottom field in order to 176 + * program any DMA correctly. Using ALTERNATE is wrong, since 177 + * that just says that it is either a top or a bottom field, 178 + * but not which of the two it is. 179 + */ 180 + dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); 181 + return -EINVAL; 182 + } 183 + vbuf->sequence = 0; 184 + vbuf->request_fd = -1; 185 + 186 + if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 187 + switch (b->memory) { 188 + case VB2_MEMORY_USERPTR: 189 + for (plane = 0; plane < vb->num_planes; ++plane) { 190 + planes[plane].m.userptr = 191 + b->m.planes[plane].m.userptr; 192 + planes[plane].length = 193 + b->m.planes[plane].length; 194 + } 195 + break; 196 + case VB2_MEMORY_DMABUF: 197 + for (plane = 0; plane < vb->num_planes; ++plane) { 198 + planes[plane].m.fd = 199 + b->m.planes[plane].m.fd; 200 + planes[plane].length = 201 + b->m.planes[plane].length; 202 + } 203 + break; 204 + default: 205 + for (plane = 0; plane < vb->num_planes; ++plane) { 206 + planes[plane].m.offset = 207 + vb->planes[plane].m.offset; 208 + planes[plane].length = 209 + vb->planes[plane].length; 210 + } 211 + break; 212 + } 213 + 214 + /* Fill in driver-provided information for OUTPUT types */ 215 + if (V4L2_TYPE_IS_OUTPUT(b->type)) { 216 + /* 217 + * Will have to go up to b->length when API starts 218 + * accepting variable number of planes. 219 + * 220 + * If bytesused == 0 for the output buffer, then fall 221 + * back to the full buffer size. In that case 222 + * userspace clearly never bothered to set it and 223 + * it's a safe assumption that they really meant to 224 + * use the full plane sizes. 225 + * 226 + * Some drivers, e.g. old codec drivers, use bytesused == 0 227 + * as a way to indicate that streaming is finished. 228 + * In that case, the driver should use the 229 + * allow_zero_bytesused flag to keep old userspace 230 + * applications working. 231 + */ 232 + for (plane = 0; plane < vb->num_planes; ++plane) { 233 + struct vb2_plane *pdst = &planes[plane]; 234 + struct v4l2_plane *psrc = &b->m.planes[plane]; 235 + 236 + if (psrc->bytesused == 0) 237 + vb2_warn_zero_bytesused(vb); 238 + 239 + if (vb->vb2_queue->allow_zero_bytesused) 240 + pdst->bytesused = psrc->bytesused; 241 + else 242 + pdst->bytesused = psrc->bytesused ? 243 + psrc->bytesused : pdst->length; 244 + pdst->data_offset = psrc->data_offset; 245 + } 246 + } 247 + } else { 248 + /* 249 + * Single-planar buffers do not use planes array, 250 + * so fill in relevant v4l2_buffer struct fields instead. 251 + * In videobuf we use our internal V4l2_planes struct for 252 + * single-planar buffers as well, for simplicity. 253 + * 254 + * If bytesused == 0 for the output buffer, then fall back 255 + * to the full buffer size as that's a sensible default. 256 + * 257 + * Some drivers, e.g. old codec drivers, use bytesused == 0 as 258 + * a way to indicate that streaming is finished. In that case, 259 + * the driver should use the allow_zero_bytesused flag to keep 260 + * old userspace applications working. 261 + */ 262 + switch (b->memory) { 263 + case VB2_MEMORY_USERPTR: 264 + planes[0].m.userptr = b->m.userptr; 265 + planes[0].length = b->length; 266 + break; 267 + case VB2_MEMORY_DMABUF: 268 + planes[0].m.fd = b->m.fd; 269 + planes[0].length = b->length; 270 + break; 271 + default: 272 + planes[0].m.offset = vb->planes[0].m.offset; 273 + planes[0].length = vb->planes[0].length; 274 + break; 275 + } 276 + 277 + planes[0].data_offset = 0; 278 + if (V4L2_TYPE_IS_OUTPUT(b->type)) { 279 + if (b->bytesused == 0) 280 + vb2_warn_zero_bytesused(vb); 281 + 282 + if (vb->vb2_queue->allow_zero_bytesused) 283 + planes[0].bytesused = b->bytesused; 284 + else 285 + planes[0].bytesused = b->bytesused ? 286 + b->bytesused : planes[0].length; 287 + } else 288 + planes[0].bytesused = 0; 289 + 290 + } 291 + 292 + /* Zero flags that we handle */ 293 + vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; 294 + if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { 295 + /* 296 + * Non-COPY timestamps and non-OUTPUT queues will get 297 + * their timestamp and timestamp source flags from the 298 + * queue. 299 + */ 300 + vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 301 + } 302 + 303 + if (V4L2_TYPE_IS_OUTPUT(b->type)) { 304 + /* 305 + * For output buffers mask out the timecode flag: 306 + * this will be handled later in vb2_qbuf(). 307 + * The 'field' is valid metadata for this output buffer 308 + * and so that needs to be copied here. 309 + */ 310 + vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; 311 + vbuf->field = b->field; 312 + } else { 313 + /* Zero any output buffer flags as this is a capture buffer */ 314 + vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; 315 + /* Zero last flag, this is a signal from driver to userspace */ 316 + vbuf->flags &= ~V4L2_BUF_FLAG_LAST; 317 + } 318 + 319 + return 0; 320 + } 321 + 322 + static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 323 + struct v4l2_buffer *b, 324 + const char *opname, 325 + struct media_request **p_req) 326 + { 327 + struct media_request *req; 328 + struct vb2_v4l2_buffer *vbuf; 329 + struct vb2_buffer *vb; 330 + int ret; 331 + 173 332 if (b->type != q->type) { 174 333 dprintk(1, "%s: invalid buffer type\n", opname); 175 334 return -EINVAL; ··· 363 178 return -EINVAL; 364 179 } 365 180 366 - return __verify_planes_array(q->bufs[b->index], b); 181 + vb = q->bufs[b->index]; 182 + vbuf = to_vb2_v4l2_buffer(vb); 183 + ret = __verify_planes_array(vb, b); 184 + if (ret) 185 + return ret; 186 + 187 + if (!vb->prepared) { 188 + /* Copy relevant information provided by the userspace */ 189 + memset(vbuf->planes, 0, 190 + sizeof(vbuf->planes[0]) * vb->num_planes); 191 + ret = vb2_fill_vb2_v4l2_buffer(vb, b); 192 + if (ret) 193 + return ret; 194 + } 195 + 196 + if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 197 + if (q->uses_requests) { 198 + dprintk(1, "%s: queue uses requests\n", opname); 199 + return -EBUSY; 200 + } 201 + return 0; 202 + } else if (!q->supports_requests) { 203 + dprintk(1, "%s: queue does not support requests\n", opname); 204 + return -EACCES; 205 + } else if (q->uses_qbuf) { 206 + dprintk(1, "%s: queue does not use requests\n", opname); 207 + return -EBUSY; 208 + } 209 + 210 + /* 211 + * For proper locking when queueing a request you need to be able 212 + * to lock access to the vb2 queue, so check that there is a lock 213 + * that we can use. In addition p_req must be non-NULL. 214 + */ 215 + if (WARN_ON(!q->lock || !p_req)) 216 + return -EINVAL; 217 + 218 + /* 219 + * Make sure this op is implemented by the driver. It's easy to forget 220 + * this callback, but is it important when canceling a buffer in a 221 + * queued request. 222 + */ 223 + if (WARN_ON(!q->ops->buf_request_complete)) 224 + return -EINVAL; 225 + 226 + if (vb->state != VB2_BUF_STATE_DEQUEUED) { 227 + dprintk(1, "%s: buffer is not in dequeued state\n", opname); 228 + return -EINVAL; 229 + } 230 + 231 + if (b->request_fd < 0) { 232 + dprintk(1, "%s: request_fd < 0\n", opname); 233 + return -EINVAL; 234 + } 235 + 236 + req = media_request_get_by_fd(mdev, b->request_fd); 237 + if (IS_ERR(req)) { 238 + dprintk(1, "%s: invalid request_fd\n", opname); 239 + return PTR_ERR(req); 240 + } 241 + 242 + /* 243 + * Early sanity check. This is checked again when the buffer 244 + * is bound to the request in vb2_core_qbuf(). 245 + */ 246 + if (req->state != MEDIA_REQUEST_STATE_IDLE && 247 + req->state != MEDIA_REQUEST_STATE_UPDATING) { 248 + dprintk(1, "%s: request is not idle\n", opname); 249 + media_request_put(req); 250 + return -EBUSY; 251 + } 252 + 253 + *p_req = req; 254 + vbuf->request_fd = b->request_fd; 255 + 256 + return 0; 367 257 } 368 258 369 259 /* ··· 464 204 b->timecode = vbuf->timecode; 465 205 b->sequence = vbuf->sequence; 466 206 b->reserved2 = 0; 467 - b->reserved = 0; 207 + b->request_fd = 0; 468 208 469 209 if (q->is_multiplanar) { 470 210 /* ··· 521 261 case VB2_BUF_STATE_ACTIVE: 522 262 b->flags |= V4L2_BUF_FLAG_QUEUED; 523 263 break; 264 + case VB2_BUF_STATE_IN_REQUEST: 265 + b->flags |= V4L2_BUF_FLAG_IN_REQUEST; 266 + break; 524 267 case VB2_BUF_STATE_ERROR: 525 268 b->flags |= V4L2_BUF_FLAG_ERROR; 526 269 /* fall through */ 527 270 case VB2_BUF_STATE_DONE: 528 271 b->flags |= V4L2_BUF_FLAG_DONE; 529 - break; 530 - case VB2_BUF_STATE_PREPARED: 531 - b->flags |= V4L2_BUF_FLAG_PREPARED; 532 272 break; 533 273 case VB2_BUF_STATE_PREPARING: 534 274 case VB2_BUF_STATE_DEQUEUED: ··· 537 277 break; 538 278 } 539 279 280 + if ((vb->state == VB2_BUF_STATE_DEQUEUED || 281 + vb->state == VB2_BUF_STATE_IN_REQUEST) && 282 + vb->synced && vb->prepared) 283 + b->flags |= V4L2_BUF_FLAG_PREPARED; 284 + 540 285 if (vb2_buffer_in_use(q, vb)) 541 286 b->flags |= V4L2_BUF_FLAG_MAPPED; 287 + if (vbuf->request_fd >= 0) { 288 + b->flags |= V4L2_BUF_FLAG_REQUEST_FD; 289 + b->request_fd = vbuf->request_fd; 290 + } 542 291 543 292 if (!q->is_output && 544 293 b->flags & V4L2_BUF_FLAG_DONE && ··· 560 291 * v4l2_buffer by the userspace. It also verifies that struct 561 292 * v4l2_buffer has a valid number of planes. 562 293 */ 563 - static int __fill_vb2_buffer(struct vb2_buffer *vb, 564 - const void *pb, struct vb2_plane *planes) 294 + static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) 565 295 { 566 - struct vb2_queue *q = vb->vb2_queue; 567 - const struct v4l2_buffer *b = pb; 568 296 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 569 297 unsigned int plane; 570 - int ret; 571 298 572 - ret = __verify_length(vb, b); 573 - if (ret < 0) { 574 - dprintk(1, "plane parameters verification failed: %d\n", ret); 575 - return ret; 576 - } 577 - if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { 578 - /* 579 - * If the format's field is ALTERNATE, then the buffer's field 580 - * should be either TOP or BOTTOM, not ALTERNATE since that 581 - * makes no sense. The driver has to know whether the 582 - * buffer represents a top or a bottom field in order to 583 - * program any DMA correctly. Using ALTERNATE is wrong, since 584 - * that just says that it is either a top or a bottom field, 585 - * but not which of the two it is. 586 - */ 587 - dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); 588 - return -EINVAL; 589 - } 590 - vb->timestamp = 0; 591 - vbuf->sequence = 0; 299 + if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp) 300 + vb->timestamp = 0; 592 301 593 - if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 594 - if (b->memory == VB2_MEMORY_USERPTR) { 595 - for (plane = 0; plane < vb->num_planes; ++plane) { 596 - planes[plane].m.userptr = 597 - b->m.planes[plane].m.userptr; 598 - planes[plane].length = 599 - b->m.planes[plane].length; 600 - } 302 + for (plane = 0; plane < vb->num_planes; ++plane) { 303 + if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) { 304 + planes[plane].m = vbuf->planes[plane].m; 305 + planes[plane].length = vbuf->planes[plane].length; 601 306 } 602 - if (b->memory == VB2_MEMORY_DMABUF) { 603 - for (plane = 0; plane < vb->num_planes; ++plane) { 604 - planes[plane].m.fd = 605 - b->m.planes[plane].m.fd; 606 - planes[plane].length = 607 - b->m.planes[plane].length; 608 - } 609 - } 610 - 611 - /* Fill in driver-provided information for OUTPUT types */ 612 - if (V4L2_TYPE_IS_OUTPUT(b->type)) { 613 - /* 614 - * Will have to go up to b->length when API starts 615 - * accepting variable number of planes. 616 - * 617 - * If bytesused == 0 for the output buffer, then fall 618 - * back to the full buffer size. In that case 619 - * userspace clearly never bothered to set it and 620 - * it's a safe assumption that they really meant to 621 - * use the full plane sizes. 622 - * 623 - * Some drivers, e.g. old codec drivers, use bytesused == 0 624 - * as a way to indicate that streaming is finished. 625 - * In that case, the driver should use the 626 - * allow_zero_bytesused flag to keep old userspace 627 - * applications working. 628 - */ 629 - for (plane = 0; plane < vb->num_planes; ++plane) { 630 - struct vb2_plane *pdst = &planes[plane]; 631 - struct v4l2_plane *psrc = &b->m.planes[plane]; 632 - 633 - if (psrc->bytesused == 0) 634 - vb2_warn_zero_bytesused(vb); 635 - 636 - if (vb->vb2_queue->allow_zero_bytesused) 637 - pdst->bytesused = psrc->bytesused; 638 - else 639 - pdst->bytesused = psrc->bytesused ? 640 - psrc->bytesused : pdst->length; 641 - pdst->data_offset = psrc->data_offset; 642 - } 643 - } 644 - } else { 645 - /* 646 - * Single-planar buffers do not use planes array, 647 - * so fill in relevant v4l2_buffer struct fields instead. 648 - * In videobuf we use our internal V4l2_planes struct for 649 - * single-planar buffers as well, for simplicity. 650 - * 651 - * If bytesused == 0 for the output buffer, then fall back 652 - * to the full buffer size as that's a sensible default. 653 - * 654 - * Some drivers, e.g. old codec drivers, use bytesused == 0 as 655 - * a way to indicate that streaming is finished. In that case, 656 - * the driver should use the allow_zero_bytesused flag to keep 657 - * old userspace applications working. 658 - */ 659 - if (b->memory == VB2_MEMORY_USERPTR) { 660 - planes[0].m.userptr = b->m.userptr; 661 - planes[0].length = b->length; 662 - } 663 - 664 - if (b->memory == VB2_MEMORY_DMABUF) { 665 - planes[0].m.fd = b->m.fd; 666 - planes[0].length = b->length; 667 - } 668 - 669 - if (V4L2_TYPE_IS_OUTPUT(b->type)) { 670 - if (b->bytesused == 0) 671 - vb2_warn_zero_bytesused(vb); 672 - 673 - if (vb->vb2_queue->allow_zero_bytesused) 674 - planes[0].bytesused = b->bytesused; 675 - else 676 - planes[0].bytesused = b->bytesused ? 677 - b->bytesused : planes[0].length; 678 - } else 679 - planes[0].bytesused = 0; 680 - 307 + planes[plane].bytesused = vbuf->planes[plane].bytesused; 308 + planes[plane].data_offset = vbuf->planes[plane].data_offset; 681 309 } 682 - 683 - /* Zero flags that the vb2 core handles */ 684 - vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; 685 - if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { 686 - /* 687 - * Non-COPY timestamps and non-OUTPUT queues will get 688 - * their timestamp and timestamp source flags from the 689 - * queue. 690 - */ 691 - vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 692 - } 693 - 694 - if (V4L2_TYPE_IS_OUTPUT(b->type)) { 695 - /* 696 - * For output buffers mask out the timecode flag: 697 - * this will be handled later in vb2_qbuf(). 698 - * The 'field' is valid metadata for this output buffer 699 - * and so that needs to be copied here. 700 - */ 701 - vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; 702 - vbuf->field = b->field; 703 - } else { 704 - /* Zero any output buffer flags as this is a capture buffer */ 705 - vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; 706 - /* Zero last flag, this is a signal from driver to userspace */ 707 - vbuf->flags &= ~V4L2_BUF_FLAG_LAST; 708 - } 709 - 710 310 return 0; 711 311 } 712 312 713 313 static const struct vb2_buf_ops v4l2_buf_ops = { 714 314 .verify_planes_array = __verify_planes_array_core, 315 + .init_buffer = __init_v4l2_vb2_buffer, 715 316 .fill_user_buffer = __fill_v4l2_buffer, 716 317 .fill_vb2_buffer = __fill_vb2_buffer, 717 318 .copy_timestamp = __copy_timestamp, ··· 622 483 } 623 484 EXPORT_SYMBOL(vb2_querybuf); 624 485 486 + static void fill_buf_caps(struct vb2_queue *q, u32 *caps) 487 + { 488 + *caps = 0; 489 + if (q->io_modes & VB2_MMAP) 490 + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP; 491 + if (q->io_modes & VB2_USERPTR) 492 + *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; 493 + if (q->io_modes & VB2_DMABUF) 494 + *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; 495 + if (q->supports_requests) 496 + *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; 497 + } 498 + 625 499 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) 626 500 { 627 501 int ret = vb2_verify_memory_type(q, req->memory, req->type); 628 502 503 + fill_buf_caps(q, &req->capabilities); 629 504 return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); 630 505 } 631 506 EXPORT_SYMBOL_GPL(vb2_reqbufs); 632 507 633 - int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) 508 + int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 509 + struct v4l2_buffer *b) 634 510 { 635 511 int ret; 636 512 ··· 654 500 return -EBUSY; 655 501 } 656 502 657 - ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf"); 503 + if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) 504 + return -EINVAL; 505 + 506 + ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL); 658 507 659 508 return ret ? ret : vb2_core_prepare_buf(q, b->index, b); 660 509 } ··· 671 514 int ret = vb2_verify_memory_type(q, create->memory, f->type); 672 515 unsigned i; 673 516 517 + fill_buf_caps(q, &create->capabilities); 674 518 create->index = q->num_buffers; 675 519 if (create->count == 0) 676 520 return ret != -EBUSY ? ret : 0; ··· 718 560 } 719 561 EXPORT_SYMBOL_GPL(vb2_create_bufs); 720 562 721 - int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) 563 + int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, 564 + struct v4l2_buffer *b) 722 565 { 566 + struct media_request *req = NULL; 723 567 int ret; 724 568 725 569 if (vb2_fileio_is_active(q)) { ··· 729 569 return -EBUSY; 730 570 } 731 571 732 - ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); 733 - return ret ? ret : vb2_core_qbuf(q, b->index, b); 572 + ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req); 573 + if (ret) 574 + return ret; 575 + ret = vb2_core_qbuf(q, b->index, b, req); 576 + if (req) 577 + media_request_put(req); 578 + return ret; 734 579 } 735 580 EXPORT_SYMBOL_GPL(vb2_qbuf); 736 581 ··· 879 714 struct video_device *vdev = video_devdata(file); 880 715 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); 881 716 717 + fill_buf_caps(vdev->queue, &p->capabilities); 882 718 if (res) 883 719 return res; 884 720 if (vb2_queue_is_busy(vdev, file)) ··· 901 735 p->format.type); 902 736 903 737 p->index = vdev->queue->num_buffers; 738 + fill_buf_caps(vdev->queue, &p->capabilities); 904 739 /* 905 740 * If count == 0, then just check if memory and type are valid. 906 741 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. ··· 927 760 928 761 if (vb2_queue_is_busy(vdev, file)) 929 762 return -EBUSY; 930 - return vb2_prepare_buf(vdev->queue, p); 763 + return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p); 931 764 } 932 765 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); 933 766 ··· 946 779 947 780 if (vb2_queue_is_busy(vdev, file)) 948 781 return -EBUSY; 949 - return vb2_qbuf(vdev->queue, p); 782 + return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p); 950 783 } 951 784 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); 952 785 ··· 1127 960 mutex_lock(vq->lock); 1128 961 } 1129 962 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); 963 + 964 + /* 965 + * Note that this function is called during validation time and 966 + * thus the req_queue_mutex is held to ensure no request objects 967 + * can be added or deleted while validating. So there is no need 968 + * to protect the objects list. 969 + */ 970 + int vb2_request_validate(struct media_request *req) 971 + { 972 + struct media_request_object *obj; 973 + int ret = 0; 974 + 975 + if (!vb2_request_buffer_cnt(req)) 976 + return -ENOENT; 977 + 978 + list_for_each_entry(obj, &req->objects, list) { 979 + if (!obj->ops->prepare) 980 + continue; 981 + 982 + ret = obj->ops->prepare(obj); 983 + if (ret) 984 + break; 985 + } 986 + 987 + if (ret) { 988 + list_for_each_entry_continue_reverse(obj, &req->objects, list) 989 + if (obj->ops->unprepare) 990 + obj->ops->unprepare(obj); 991 + return ret; 992 + } 993 + return 0; 994 + } 995 + EXPORT_SYMBOL_GPL(vb2_request_validate); 996 + 997 + void vb2_request_queue(struct media_request *req) 998 + { 999 + struct media_request_object *obj, *obj_safe; 1000 + 1001 + /* 1002 + * Queue all objects. Note that buffer objects are at the end of the 1003 + * objects list, after all other object types. Once buffer objects 1004 + * are queued, the driver might delete them immediately (if the driver 1005 + * processes the buffer at once), so we have to use 1006 + * list_for_each_entry_safe() to handle the case where the object we 1007 + * queue is deleted. 1008 + */ 1009 + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) 1010 + if (obj->ops->queue) 1011 + obj->ops->queue(obj); 1012 + } 1013 + EXPORT_SYMBOL_GPL(vb2_request_queue); 1130 1014 1131 1015 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); 1132 1016 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+2 -3
drivers/media/dvb-core/dvb_vb2.c
··· 146 146 dprintk(3, "[%s]\n", ctx->name); 147 147 } 148 148 149 - static int _fill_vb2_buffer(struct vb2_buffer *vb, 150 - const void *pb, struct vb2_plane *planes) 149 + static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) 151 150 { 152 151 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 153 152 ··· 384 385 { 385 386 int ret; 386 387 387 - ret = vb2_core_qbuf(&ctx->vb_q, b->index, b); 388 + ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); 388 389 if (ret) { 389 390 dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, 390 391 b->index, ret);
+3 -2
drivers/media/dvb-frontends/rtl2832_sdr.c
··· 1394 1394 case RTL2832_SDR_TUNER_E4000: 1395 1395 v4l2_ctrl_handler_init(&dev->hdl, 9); 1396 1396 if (subdev) 1397 - v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL); 1397 + v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, 1398 + NULL, true); 1398 1399 break; 1399 1400 case RTL2832_SDR_TUNER_R820T: 1400 1401 case RTL2832_SDR_TUNER_R828D: ··· 1424 1423 v4l2_ctrl_handler_init(&dev->hdl, 2); 1425 1424 if (subdev) 1426 1425 v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, 1427 - NULL); 1426 + NULL, true); 1428 1427 break; 1429 1428 default: 1430 1429 v4l2_ctrl_handler_init(&dev->hdl, 0);
+20 -4
drivers/media/media-device.c
··· 30 30 #include <media/media-device.h> 31 31 #include <media/media-devnode.h> 32 32 #include <media/media-entity.h> 33 + #include <media/media-request.h> 33 34 34 35 #ifdef CONFIG_MEDIA_CONTROLLER 35 36 ··· 378 377 return ret; 379 378 } 380 379 380 + static long media_device_request_alloc(struct media_device *mdev, 381 + int *alloc_fd) 382 + { 383 + if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) 384 + return -ENOTTY; 385 + 386 + return media_request_alloc(mdev, alloc_fd); 387 + } 388 + 381 389 static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) 382 390 { 383 - /* All media IOCTLs are _IOWR() */ 384 - if (copy_from_user(karg, uarg, _IOC_SIZE(cmd))) 391 + if ((_IOC_DIR(cmd) & _IOC_WRITE) && 392 + copy_from_user(karg, uarg, _IOC_SIZE(cmd))) 385 393 return -EFAULT; 386 394 387 395 return 0; ··· 398 388 399 389 static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) 400 390 { 401 - /* All media IOCTLs are _IOWR() */ 402 - if (copy_to_user(uarg, karg, _IOC_SIZE(cmd))) 391 + if ((_IOC_DIR(cmd) & _IOC_READ) && 392 + copy_to_user(uarg, karg, _IOC_SIZE(cmd))) 403 393 return -EFAULT; 404 394 405 395 return 0; ··· 435 425 MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX), 436 426 MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX), 437 427 MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX), 428 + MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0), 438 429 }; 439 430 440 431 static long media_device_ioctl(struct file *filp, unsigned int cmd, ··· 702 691 INIT_LIST_HEAD(&mdev->pads); 703 692 INIT_LIST_HEAD(&mdev->links); 704 693 INIT_LIST_HEAD(&mdev->entity_notify); 694 + 695 + mutex_init(&mdev->req_queue_mutex); 705 696 mutex_init(&mdev->graph_mutex); 706 697 ida_init(&mdev->entity_internal_idx); 698 + 699 + atomic_set(&mdev->request_id, 0); 707 700 708 701 dev_dbg(mdev->dev, "Media device initialized\n"); 709 702 } ··· 719 704 mdev->entity_internal_idx_max = 0; 720 705 media_graph_walk_cleanup(&mdev->pm_count_walk); 721 706 mutex_destroy(&mdev->graph_mutex); 707 + mutex_destroy(&mdev->req_queue_mutex); 722 708 } 723 709 EXPORT_SYMBOL_GPL(media_device_cleanup); 724 710
+501
drivers/media/media-request.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Media device request objects 4 + * 5 + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 + * Copyright (C) 2018 Intel Corporation 7 + * Copyright (C) 2018 Google, Inc. 8 + * 9 + * Author: Hans Verkuil <hans.verkuil@cisco.com> 10 + * Author: Sakari Ailus <sakari.ailus@linux.intel.com> 11 + */ 12 + 13 + #include <linux/anon_inodes.h> 14 + #include <linux/file.h> 15 + #include <linux/refcount.h> 16 + 17 + #include <media/media-device.h> 18 + #include <media/media-request.h> 19 + 20 + static const char * const request_state[] = { 21 + [MEDIA_REQUEST_STATE_IDLE] = "idle", 22 + [MEDIA_REQUEST_STATE_VALIDATING] = "validating", 23 + [MEDIA_REQUEST_STATE_QUEUED] = "queued", 24 + [MEDIA_REQUEST_STATE_COMPLETE] = "complete", 25 + [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", 26 + [MEDIA_REQUEST_STATE_UPDATING] = "updating", 27 + }; 28 + 29 + static const char * 30 + media_request_state_str(enum media_request_state state) 31 + { 32 + BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); 33 + 34 + if (WARN_ON(state >= ARRAY_SIZE(request_state))) 35 + return "invalid"; 36 + return request_state[state]; 37 + } 38 + 39 + static void media_request_clean(struct media_request *req) 40 + { 41 + struct media_request_object *obj, *obj_safe; 42 + 43 + /* Just a sanity check. No other code path is allowed to change this. */ 44 + WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); 45 + WARN_ON(req->updating_count); 46 + WARN_ON(req->access_count); 47 + 48 + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 49 + media_request_object_unbind(obj); 50 + media_request_object_put(obj); 51 + } 52 + 53 + req->updating_count = 0; 54 + req->access_count = 0; 55 + WARN_ON(req->num_incomplete_objects); 56 + req->num_incomplete_objects = 0; 57 + wake_up_interruptible_all(&req->poll_wait); 58 + } 59 + 60 + static void media_request_release(struct kref *kref) 61 + { 62 + struct media_request *req = 63 + container_of(kref, struct media_request, kref); 64 + struct media_device *mdev = req->mdev; 65 + 66 + dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); 67 + 68 + /* No other users, no need for a spinlock */ 69 + req->state = MEDIA_REQUEST_STATE_CLEANING; 70 + 71 + media_request_clean(req); 72 + 73 + if (mdev->ops->req_free) 74 + mdev->ops->req_free(req); 75 + else 76 + kfree(req); 77 + } 78 + 79 + void media_request_put(struct media_request *req) 80 + { 81 + kref_put(&req->kref, media_request_release); 82 + } 83 + EXPORT_SYMBOL_GPL(media_request_put); 84 + 85 + static int media_request_close(struct inode *inode, struct file *filp) 86 + { 87 + struct media_request *req = filp->private_data; 88 + 89 + media_request_put(req); 90 + return 0; 91 + } 92 + 93 + static __poll_t media_request_poll(struct file *filp, 94 + struct poll_table_struct *wait) 95 + { 96 + struct media_request *req = filp->private_data; 97 + unsigned long flags; 98 + __poll_t ret = 0; 99 + 100 + if (!(poll_requested_events(wait) & EPOLLPRI)) 101 + return 0; 102 + 103 + spin_lock_irqsave(&req->lock, flags); 104 + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { 105 + ret = EPOLLPRI; 106 + goto unlock; 107 + } 108 + if (req->state != MEDIA_REQUEST_STATE_QUEUED) { 109 + ret = EPOLLERR; 110 + goto unlock; 111 + } 112 + 113 + poll_wait(filp, &req->poll_wait, wait); 114 + 115 + unlock: 116 + spin_unlock_irqrestore(&req->lock, flags); 117 + return ret; 118 + } 119 + 120 + static long media_request_ioctl_queue(struct media_request *req) 121 + { 122 + struct media_device *mdev = req->mdev; 123 + enum media_request_state state; 124 + unsigned long flags; 125 + int ret; 126 + 127 + dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); 128 + 129 + /* 130 + * Ensure the request that is validated will be the one that gets queued 131 + * next by serialising the queueing process. This mutex is also used 132 + * to serialize with canceling a vb2 queue and with setting values such 133 + * as controls in a request. 134 + */ 135 + mutex_lock(&mdev->req_queue_mutex); 136 + 137 + media_request_get(req); 138 + 139 + spin_lock_irqsave(&req->lock, flags); 140 + if (req->state == MEDIA_REQUEST_STATE_IDLE) 141 + req->state = MEDIA_REQUEST_STATE_VALIDATING; 142 + state = req->state; 143 + spin_unlock_irqrestore(&req->lock, flags); 144 + if (state != MEDIA_REQUEST_STATE_VALIDATING) { 145 + dev_dbg(mdev->dev, 146 + "request: unable to queue %s, request in state %s\n", 147 + req->debug_str, media_request_state_str(state)); 148 + media_request_put(req); 149 + mutex_unlock(&mdev->req_queue_mutex); 150 + return -EBUSY; 151 + } 152 + 153 + ret = mdev->ops->req_validate(req); 154 + 155 + /* 156 + * If the req_validate was successful, then we mark the state as QUEUED 157 + * and call req_queue. The reason we set the state first is that this 158 + * allows req_queue to unbind or complete the queued objects in case 159 + * they are immediately 'consumed'. State changes from QUEUED to another 160 + * state can only happen if either the driver changes the state or if 161 + * the user cancels the vb2 queue. The driver can only change the state 162 + * after each object is queued through the req_queue op (and note that 163 + * that op cannot fail), so setting the state to QUEUED up front is 164 + * safe. 165 + * 166 + * The other reason for changing the state is if the vb2 queue is 167 + * canceled, and that uses the req_queue_mutex which is still locked 168 + * while req_queue is called, so that's safe as well. 169 + */ 170 + spin_lock_irqsave(&req->lock, flags); 171 + req->state = ret ? MEDIA_REQUEST_STATE_IDLE 172 + : MEDIA_REQUEST_STATE_QUEUED; 173 + spin_unlock_irqrestore(&req->lock, flags); 174 + 175 + if (!ret) 176 + mdev->ops->req_queue(req); 177 + 178 + mutex_unlock(&mdev->req_queue_mutex); 179 + 180 + if (ret) { 181 + dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", 182 + req->debug_str, ret); 183 + media_request_put(req); 184 + } 185 + 186 + return ret; 187 + } 188 + 189 + static long media_request_ioctl_reinit(struct media_request *req) 190 + { 191 + struct media_device *mdev = req->mdev; 192 + unsigned long flags; 193 + 194 + spin_lock_irqsave(&req->lock, flags); 195 + if (req->state != MEDIA_REQUEST_STATE_IDLE && 196 + req->state != MEDIA_REQUEST_STATE_COMPLETE) { 197 + dev_dbg(mdev->dev, 198 + "request: %s not in idle or complete state, cannot reinit\n", 199 + req->debug_str); 200 + spin_unlock_irqrestore(&req->lock, flags); 201 + return -EBUSY; 202 + } 203 + if (req->access_count) { 204 + dev_dbg(mdev->dev, 205 + "request: %s is being accessed, cannot reinit\n", 206 + req->debug_str); 207 + spin_unlock_irqrestore(&req->lock, flags); 208 + return -EBUSY; 209 + } 210 + req->state = MEDIA_REQUEST_STATE_CLEANING; 211 + spin_unlock_irqrestore(&req->lock, flags); 212 + 213 + media_request_clean(req); 214 + 215 + spin_lock_irqsave(&req->lock, flags); 216 + req->state = MEDIA_REQUEST_STATE_IDLE; 217 + spin_unlock_irqrestore(&req->lock, flags); 218 + 219 + return 0; 220 + } 221 + 222 + static long media_request_ioctl(struct file *filp, unsigned int cmd, 223 + unsigned long arg) 224 + { 225 + struct media_request *req = filp->private_data; 226 + 227 + switch (cmd) { 228 + case MEDIA_REQUEST_IOC_QUEUE: 229 + return media_request_ioctl_queue(req); 230 + case MEDIA_REQUEST_IOC_REINIT: 231 + return media_request_ioctl_reinit(req); 232 + default: 233 + return -ENOIOCTLCMD; 234 + } 235 + } 236 + 237 + static const struct file_operations request_fops = { 238 + .owner = THIS_MODULE, 239 + .poll = media_request_poll, 240 + .unlocked_ioctl = media_request_ioctl, 241 + .release = media_request_close, 242 + }; 243 + 244 + struct media_request * 245 + media_request_get_by_fd(struct media_device *mdev, int request_fd) 246 + { 247 + struct file *filp; 248 + struct media_request *req; 249 + 250 + if (!mdev || !mdev->ops || 251 + !mdev->ops->req_validate || !mdev->ops->req_queue) 252 + return ERR_PTR(-EACCES); 253 + 254 + filp = fget(request_fd); 255 + if (!filp) 256 + goto err_no_req_fd; 257 + 258 + if (filp->f_op != &request_fops) 259 + goto err_fput; 260 + req = filp->private_data; 261 + if (req->mdev != mdev) 262 + goto err_fput; 263 + 264 + /* 265 + * Note: as long as someone has an open filehandle of the request, 266 + * the request can never be released. The fget() above ensures that 267 + * even if userspace closes the request filehandle, the release() 268 + * fop won't be called, so the media_request_get() always succeeds 269 + * and there is no race condition where the request was released 270 + * before media_request_get() is called. 271 + */ 272 + media_request_get(req); 273 + fput(filp); 274 + 275 + return req; 276 + 277 + err_fput: 278 + fput(filp); 279 + 280 + err_no_req_fd: 281 + dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); 282 + return ERR_PTR(-EINVAL); 283 + } 284 + EXPORT_SYMBOL_GPL(media_request_get_by_fd); 285 + 286 + int media_request_alloc(struct media_device *mdev, int *alloc_fd) 287 + { 288 + struct media_request *req; 289 + struct file *filp; 290 + int fd; 291 + int ret; 292 + 293 + /* Either both are NULL or both are non-NULL */ 294 + if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) 295 + return -ENOMEM; 296 + 297 + fd = get_unused_fd_flags(O_CLOEXEC); 298 + if (fd < 0) 299 + return fd; 300 + 301 + filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); 302 + if (IS_ERR(filp)) { 303 + ret = PTR_ERR(filp); 304 + goto err_put_fd; 305 + } 306 + 307 + if (mdev->ops->req_alloc) 308 + req = mdev->ops->req_alloc(mdev); 309 + else 310 + req = kzalloc(sizeof(*req), GFP_KERNEL); 311 + if (!req) { 312 + ret = -ENOMEM; 313 + goto err_fput; 314 + } 315 + 316 + filp->private_data = req; 317 + req->mdev = mdev; 318 + req->state = MEDIA_REQUEST_STATE_IDLE; 319 + req->num_incomplete_objects = 0; 320 + kref_init(&req->kref); 321 + INIT_LIST_HEAD(&req->objects); 322 + spin_lock_init(&req->lock); 323 + init_waitqueue_head(&req->poll_wait); 324 + req->updating_count = 0; 325 + req->access_count = 0; 326 + 327 + *alloc_fd = fd; 328 + 329 + snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", 330 + atomic_inc_return(&mdev->request_id), fd); 331 + dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); 332 + 333 + fd_install(fd, filp); 334 + 335 + return 0; 336 + 337 + err_fput: 338 + fput(filp); 339 + 340 + err_put_fd: 341 + put_unused_fd(fd); 342 + 343 + return ret; 344 + } 345 + 346 + static void media_request_object_release(struct kref *kref) 347 + { 348 + struct media_request_object *obj = 349 + container_of(kref, struct media_request_object, kref); 350 + struct media_request *req = obj->req; 351 + 352 + if (WARN_ON(req)) 353 + media_request_object_unbind(obj); 354 + obj->ops->release(obj); 355 + } 356 + 357 + struct media_request_object * 358 + media_request_object_find(struct media_request *req, 359 + const struct media_request_object_ops *ops, 360 + void *priv) 361 + { 362 + struct media_request_object *obj; 363 + struct media_request_object *found = NULL; 364 + unsigned long flags; 365 + 366 + if (WARN_ON(!ops || !priv)) 367 + return NULL; 368 + 369 + spin_lock_irqsave(&req->lock, flags); 370 + list_for_each_entry(obj, &req->objects, list) { 371 + if (obj->ops == ops && obj->priv == priv) { 372 + media_request_object_get(obj); 373 + found = obj; 374 + break; 375 + } 376 + } 377 + spin_unlock_irqrestore(&req->lock, flags); 378 + return found; 379 + } 380 + EXPORT_SYMBOL_GPL(media_request_object_find); 381 + 382 + void media_request_object_put(struct media_request_object *obj) 383 + { 384 + kref_put(&obj->kref, media_request_object_release); 385 + } 386 + EXPORT_SYMBOL_GPL(media_request_object_put); 387 + 388 + void media_request_object_init(struct media_request_object *obj) 389 + { 390 + obj->ops = NULL; 391 + obj->req = NULL; 392 + obj->priv = NULL; 393 + obj->completed = false; 394 + INIT_LIST_HEAD(&obj->list); 395 + kref_init(&obj->kref); 396 + } 397 + EXPORT_SYMBOL_GPL(media_request_object_init); 398 + 399 + int media_request_object_bind(struct media_request *req, 400 + const struct media_request_object_ops *ops, 401 + void *priv, bool is_buffer, 402 + struct media_request_object *obj) 403 + { 404 + unsigned long flags; 405 + int ret = -EBUSY; 406 + 407 + if (WARN_ON(!ops->release)) 408 + return -EACCES; 409 + 410 + spin_lock_irqsave(&req->lock, flags); 411 + 412 + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) 413 + goto unlock; 414 + 415 + obj->req = req; 416 + obj->ops = ops; 417 + obj->priv = priv; 418 + 419 + if (is_buffer) 420 + list_add_tail(&obj->list, &req->objects); 421 + else 422 + list_add(&obj->list, &req->objects); 423 + req->num_incomplete_objects++; 424 + ret = 0; 425 + 426 + unlock: 427 + spin_unlock_irqrestore(&req->lock, flags); 428 + return ret; 429 + } 430 + EXPORT_SYMBOL_GPL(media_request_object_bind); 431 + 432 + void media_request_object_unbind(struct media_request_object *obj) 433 + { 434 + struct media_request *req = obj->req; 435 + unsigned long flags; 436 + bool completed = false; 437 + 438 + if (WARN_ON(!req)) 439 + return; 440 + 441 + spin_lock_irqsave(&req->lock, flags); 442 + list_del(&obj->list); 443 + obj->req = NULL; 444 + 445 + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) 446 + goto unlock; 447 + 448 + if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) 449 + goto unlock; 450 + 451 + if (req->state == MEDIA_REQUEST_STATE_CLEANING) { 452 + if (!obj->completed) 453 + req->num_incomplete_objects--; 454 + goto unlock; 455 + } 456 + 457 + if (WARN_ON(!req->num_incomplete_objects)) 458 + goto unlock; 459 + 460 + req->num_incomplete_objects--; 461 + if (req->state == MEDIA_REQUEST_STATE_QUEUED && 462 + !req->num_incomplete_objects) { 463 + req->state = MEDIA_REQUEST_STATE_COMPLETE; 464 + completed = true; 465 + wake_up_interruptible_all(&req->poll_wait); 466 + } 467 + 468 + unlock: 469 + spin_unlock_irqrestore(&req->lock, flags); 470 + if (obj->ops->unbind) 471 + obj->ops->unbind(obj); 472 + if (completed) 473 + media_request_put(req); 474 + } 475 + EXPORT_SYMBOL_GPL(media_request_object_unbind); 476 + 477 + void media_request_object_complete(struct media_request_object *obj) 478 + { 479 + struct media_request *req = obj->req; 480 + unsigned long flags; 481 + bool completed = false; 482 + 483 + spin_lock_irqsave(&req->lock, flags); 484 + if (obj->completed) 485 + goto unlock; 486 + obj->completed = true; 487 + if (WARN_ON(!req->num_incomplete_objects) || 488 + WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) 489 + goto unlock; 490 + 491 + if (!--req->num_incomplete_objects) { 492 + req->state = MEDIA_REQUEST_STATE_COMPLETE; 493 + wake_up_interruptible_all(&req->poll_wait); 494 + completed = true; 495 + } 496 + unlock: 497 + spin_unlock_irqrestore(&req->lock, flags); 498 + if (completed) 499 + media_request_put(req); 500 + } 501 + EXPORT_SYMBOL_GPL(media_request_object_complete);
+1 -1
drivers/media/pci/bt8xx/bttv-driver.c
··· 4210 4210 /* register video4linux + input */ 4211 4211 if (!bttv_tvcards[btv->c.type].no_video) { 4212 4212 v4l2_ctrl_add_handler(&btv->radio_ctrl_handler, hdl, 4213 - v4l2_ctrl_radio_filter); 4213 + v4l2_ctrl_radio_filter, false); 4214 4214 if (btv->radio_ctrl_handler.error) { 4215 4215 result = btv->radio_ctrl_handler.error; 4216 4216 goto fail2;
+1 -1
drivers/media/pci/cx23885/cx23885-417.c
··· 1527 1527 dev->cxhdl.priv = dev; 1528 1528 dev->cxhdl.func = cx23885_api_func; 1529 1529 cx2341x_handler_set_50hz(&dev->cxhdl, tsport->height == 576); 1530 - v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL); 1530 + v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL, false); 1531 1531 1532 1532 /* Allocate and initialize V4L video device */ 1533 1533 dev->v4l_device = cx23885_video_dev_alloc(tsport,
+1 -1
drivers/media/pci/cx88/cx88-blackbird.c
··· 1183 1183 err = cx2341x_handler_init(&dev->cxhdl, 36); 1184 1184 if (err) 1185 1185 goto fail_core; 1186 - v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL); 1186 + v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL, false); 1187 1187 1188 1188 /* blackbird stuff */ 1189 1189 pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
+1 -1
drivers/media/pci/cx88/cx88-video.c
··· 1378 1378 if (vc->id == V4L2_CID_CHROMA_AGC) 1379 1379 core->chroma_agc = vc; 1380 1380 } 1381 - v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL); 1381 + v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL, false); 1382 1382 1383 1383 /* load and configure helper modules */ 1384 1384
+2 -2
drivers/media/pci/saa7134/saa7134-empress.c
··· 265 265 "%s empress (%s)", dev->name, 266 266 saa7134_boards[dev->board].name); 267 267 v4l2_ctrl_handler_init(hdl, 21); 268 - v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter); 268 + v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter, false); 269 269 if (dev->empress_sd) 270 - v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL); 270 + v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL, true); 271 271 if (hdl->error) { 272 272 video_device_release(dev->empress_dev); 273 273 return hdl->error;
+1 -1
drivers/media/pci/saa7134/saa7134-video.c
··· 2137 2137 hdl = &dev->radio_ctrl_handler; 2138 2138 v4l2_ctrl_handler_init(hdl, 2); 2139 2139 v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, 2140 - v4l2_ctrl_radio_filter); 2140 + v4l2_ctrl_radio_filter, false); 2141 2141 if (hdl->error) 2142 2142 return hdl->error; 2143 2143 }
+1 -1
drivers/media/platform/exynos4-is/fimc-capture.c
··· 1424 1424 return 0; 1425 1425 1426 1426 return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler, 1427 - sensor->ctrl_handler, NULL); 1427 + sensor->ctrl_handler, NULL, true); 1428 1428 } 1429 1429 1430 1430 static const struct media_entity_operations fimc_sd_media_ops = {
+2 -2
drivers/media/platform/omap3isp/ispvideo.c
··· 940 940 int ret; 941 941 942 942 mutex_lock(&video->queue_lock); 943 - ret = vb2_qbuf(&vfh->queue, b); 943 + ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 944 944 mutex_unlock(&video->queue_lock); 945 945 946 946 return ret; ··· 1028 1028 ctrls.count = 1; 1029 1029 ctrls.controls = &ctrl; 1030 1030 1031 - ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls); 1031 + ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, NULL, &ctrls); 1032 1032 if (ret < 0) { 1033 1033 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 1034 1034 pipe->external->name);
+1 -1
drivers/media/platform/rcar-vin/rcar-core.c
··· 475 475 return ret; 476 476 477 477 ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler, 478 - NULL); 478 + NULL, true); 479 479 if (ret < 0) { 480 480 v4l2_ctrl_handler_free(&vin->ctrl_handler); 481 481 return ret;
+1 -1
drivers/media/platform/rcar_drif.c
··· 1164 1164 } 1165 1165 1166 1166 ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl, 1167 - sdr->ep.subdev->ctrl_handler, NULL); 1167 + sdr->ep.subdev->ctrl_handler, NULL, true); 1168 1168 if (ret) { 1169 1169 rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret); 1170 1170 goto error;
+2 -2
drivers/media/platform/s3c-camif/camif-capture.c
··· 943 943 if (vp->owner && vp->owner != priv) 944 944 return -EBUSY; 945 945 946 - return vb2_qbuf(&vp->vb_queue, buf); 946 + return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf); 947 947 } 948 948 949 949 static int s3c_camif_dqbuf(struct file *file, void *priv, ··· 981 981 struct v4l2_buffer *b) 982 982 { 983 983 struct camif_vp *vp = video_drvdata(file); 984 - return vb2_prepare_buf(&vp->vb_queue, b); 984 + return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b); 985 985 } 986 986 987 987 static int s3c_camif_g_selection(struct file *file, void *priv,
+2 -2
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
··· 632 632 return -EIO; 633 633 } 634 634 if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) 635 - return vb2_qbuf(&ctx->vq_src, buf); 635 + return vb2_qbuf(&ctx->vq_src, NULL, buf); 636 636 else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) 637 - return vb2_qbuf(&ctx->vq_dst, buf); 637 + return vb2_qbuf(&ctx->vq_dst, NULL, buf); 638 638 return -EINVAL; 639 639 } 640 640
+2 -2
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
··· 1621 1621 mfc_err("Call on QBUF after EOS command\n"); 1622 1622 return -EIO; 1623 1623 } 1624 - return vb2_qbuf(&ctx->vq_src, buf); 1624 + return vb2_qbuf(&ctx->vq_src, NULL, buf); 1625 1625 } else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { 1626 - return vb2_qbuf(&ctx->vq_dst, buf); 1626 + return vb2_qbuf(&ctx->vq_dst, NULL, buf); 1627 1627 } 1628 1628 return -EINVAL; 1629 1629 }
+4 -3
drivers/media/platform/soc_camera/soc_camera.c
··· 394 394 if (icd->streamer != file) 395 395 return -EBUSY; 396 396 397 - return vb2_qbuf(&icd->vb2_vidq, p); 397 + return vb2_qbuf(&icd->vb2_vidq, NULL, p); 398 398 } 399 399 400 400 static int soc_camera_dqbuf(struct file *file, void *priv, ··· 430 430 { 431 431 struct soc_camera_device *icd = file->private_data; 432 432 433 - return vb2_prepare_buf(&icd->vb2_vidq, b); 433 + return vb2_prepare_buf(&icd->vb2_vidq, NULL, b); 434 434 } 435 435 436 436 static int soc_camera_expbuf(struct file *file, void *priv, ··· 1181 1181 1182 1182 v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms); 1183 1183 1184 - ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL); 1184 + ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, 1185 + NULL, true); 1185 1186 if (ret < 0) 1186 1187 return ret; 1187 1188
+35 -15
drivers/media/platform/vim2m.c
··· 3 3 * 4 4 * This is a virtual device driver for testing mem-to-mem videobuf framework. 5 5 * It simulates a device that uses memory buffers for both source and 6 - * destination, processes the data and issues an "irq" (simulated by a timer). 6 + * destination, processes the data and issues an "irq" (simulated by a delayed 7 + * workqueue). 7 8 * The device is capable of multi-instance, multi-buffer-per-transaction 8 9 * operation (via the mem2mem framework). 9 10 * ··· 20 19 #include <linux/module.h> 21 20 #include <linux/delay.h> 22 21 #include <linux/fs.h> 23 - #include <linux/timer.h> 24 22 #include <linux/sched.h> 25 23 #include <linux/slab.h> 26 24 ··· 148 148 struct mutex dev_mutex; 149 149 spinlock_t irqlock; 150 150 151 - struct timer_list timer; 151 + struct delayed_work work_run; 152 152 153 153 struct v4l2_m2m_dev *m2m_dev; 154 154 }; ··· 336 336 return 0; 337 337 } 338 338 339 - static void schedule_irq(struct vim2m_dev *dev, int msec_timeout) 340 - { 341 - dprintk(dev, "Scheduling a simulated irq\n"); 342 - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); 343 - } 344 - 345 339 /* 346 340 * mem2mem callbacks 347 341 */ ··· 379 385 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 380 386 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 381 387 388 + /* Apply request controls if any */ 389 + v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req, 390 + &ctx->hdl); 391 + 382 392 device_process(ctx, src_buf, dst_buf); 383 393 384 - /* Run a timer, which simulates a hardware irq */ 385 - schedule_irq(dev, ctx->transtime); 394 + /* Complete request controls if any */ 395 + v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req, 396 + &ctx->hdl); 397 + 398 + /* Run delayed work, which simulates a hardware irq */ 399 + schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime)); 386 400 } 387 401 388 - static void device_isr(struct timer_list *t) 402 + static void device_work(struct work_struct *w) 389 403 { 390 - struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer); 404 + struct vim2m_dev *vim2m_dev = 405 + container_of(w, struct vim2m_dev, work_run.work); 391 406 struct vim2m_ctx *curr_ctx; 392 407 struct vb2_v4l2_buffer *src_vb, *dst_vb; 393 408 unsigned long flags; ··· 808 805 struct vb2_v4l2_buffer *vbuf; 809 806 unsigned long flags; 810 807 808 + flush_scheduled_work(); 811 809 for (;;) { 812 810 if (V4L2_TYPE_IS_OUTPUT(q->type)) 813 811 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); ··· 816 812 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 817 813 if (vbuf == NULL) 818 814 return; 815 + v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, 816 + &ctx->hdl); 819 817 spin_lock_irqsave(&ctx->dev->irqlock, flags); 820 818 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); 821 819 spin_unlock_irqrestore(&ctx->dev->irqlock, flags); 822 820 } 821 + } 822 + 823 + static void vim2m_buf_request_complete(struct vb2_buffer *vb) 824 + { 825 + struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 826 + 827 + v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); 823 828 } 824 829 825 830 static const struct vb2_ops vim2m_qops = { ··· 839 826 .stop_streaming = vim2m_stop_streaming, 840 827 .wait_prepare = vb2_ops_wait_prepare, 841 828 .wait_finish = vb2_ops_wait_finish, 829 + .buf_request_complete = vim2m_buf_request_complete, 842 830 }; 843 831 844 832 static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) ··· 855 841 src_vq->mem_ops = &vb2_vmalloc_memops; 856 842 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 857 843 src_vq->lock = &ctx->dev->dev_mutex; 844 + src_vq->supports_requests = true; 858 845 859 846 ret = vb2_queue_init(src_vq); 860 847 if (ret) ··· 1007 992 .job_abort = job_abort, 1008 993 }; 1009 994 995 + static const struct media_device_ops m2m_media_ops = { 996 + .req_validate = vb2_request_validate, 997 + .req_queue = vb2_m2m_request_queue, 998 + }; 999 + 1010 1000 static int vim2m_probe(struct platform_device *pdev) 1011 1001 { 1012 1002 struct vim2m_dev *dev; ··· 1035 1015 vfd = &dev->vfd; 1036 1016 vfd->lock = &dev->dev_mutex; 1037 1017 vfd->v4l2_dev = &dev->v4l2_dev; 1018 + INIT_DELAYED_WORK(&dev->work_run, device_work); 1038 1019 1039 1020 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); 1040 1021 if (ret) { ··· 1047 1026 v4l2_info(&dev->v4l2_dev, 1048 1027 "Device registered as /dev/video%d\n", vfd->num); 1049 1028 1050 - timer_setup(&dev->timer, device_isr, 0); 1051 1029 platform_set_drvdata(pdev, dev); 1052 1030 1053 1031 dev->m2m_dev = v4l2_m2m_init(&m2m_ops); ··· 1060 1040 dev->mdev.dev = &pdev->dev; 1061 1041 strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model)); 1062 1042 media_device_init(&dev->mdev); 1043 + dev->mdev.ops = &m2m_media_ops; 1063 1044 dev->v4l2_dev.mdev = &dev->mdev; 1064 1045 1065 1046 ret = v4l2_m2m_register_media_controller(dev->m2m_dev, ··· 1104 1083 media_device_cleanup(&dev->mdev); 1105 1084 #endif 1106 1085 v4l2_m2m_release(dev->m2m_dev); 1107 - del_timer_sync(&dev->timer); 1108 1086 video_unregister_device(&dev->vfd); 1109 1087 v4l2_device_unregister(&dev->v4l2_dev); 1110 1088
+74
drivers/media/platform/vivid/vivid-core.c
··· 627 627 kfree(dev); 628 628 } 629 629 630 + #ifdef CONFIG_MEDIA_CONTROLLER 631 + static const struct media_device_ops vivid_media_ops = { 632 + .req_validate = vb2_request_validate, 633 + .req_queue = vb2_request_queue, 634 + }; 635 + #endif 636 + 630 637 static int vivid_create_instance(struct platform_device *pdev, int inst) 631 638 { 632 639 static const struct v4l2_dv_timings def_dv_timings = ··· 663 656 return -ENOMEM; 664 657 665 658 dev->inst = inst; 659 + 660 + #ifdef CONFIG_MEDIA_CONTROLLER 661 + dev->v4l2_dev.mdev = &dev->mdev; 662 + 663 + /* Initialize media device */ 664 + strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model)); 665 + dev->mdev.dev = &pdev->dev; 666 + media_device_init(&dev->mdev); 667 + dev->mdev.ops = &vivid_media_ops; 668 + #endif 666 669 667 670 /* register v4l2_device */ 668 671 snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), ··· 1077 1060 q->min_buffers_needed = 2; 1078 1061 q->lock = &dev->mutex; 1079 1062 q->dev = dev->v4l2_dev.dev; 1063 + q->supports_requests = true; 1080 1064 1081 1065 ret = vb2_queue_init(q); 1082 1066 if (ret) ··· 1098 1080 q->min_buffers_needed = 2; 1099 1081 q->lock = &dev->mutex; 1100 1082 q->dev = dev->v4l2_dev.dev; 1083 + q->supports_requests = true; 1101 1084 1102 1085 ret = vb2_queue_init(q); 1103 1086 if (ret) ··· 1119 1100 q->min_buffers_needed = 2; 1120 1101 q->lock = &dev->mutex; 1121 1102 q->dev = dev->v4l2_dev.dev; 1103 + q->supports_requests = true; 1122 1104 1123 1105 ret = vb2_queue_init(q); 1124 1106 if (ret) ··· 1140 1120 q->min_buffers_needed = 2; 1141 1121 q->lock = &dev->mutex; 1142 1122 q->dev = dev->v4l2_dev.dev; 1123 + q->supports_requests = true; 1143 1124 1144 1125 ret = vb2_queue_init(q); 1145 1126 if (ret) ··· 1160 1139 q->min_buffers_needed = 8; 1161 1140 q->lock = &dev->mutex; 1162 1141 q->dev = dev->v4l2_dev.dev; 1142 + q->supports_requests = true; 1163 1143 1164 1144 ret = vb2_queue_init(q); 1165 1145 if (ret) ··· 1195 1173 */ 1196 1174 vfd->lock = &dev->mutex; 1197 1175 video_set_drvdata(vfd, dev); 1176 + 1177 + #ifdef CONFIG_MEDIA_CONTROLLER 1178 + dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK; 1179 + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad); 1180 + if (ret) 1181 + goto unreg_dev; 1182 + #endif 1198 1183 1199 1184 #ifdef CONFIG_VIDEO_VIVID_CEC 1200 1185 if (in_type_counter[HDMI]) { ··· 1255 1226 vfd->lock = &dev->mutex; 1256 1227 video_set_drvdata(vfd, dev); 1257 1228 1229 + #ifdef CONFIG_MEDIA_CONTROLLER 1230 + dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE; 1231 + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad); 1232 + if (ret) 1233 + goto unreg_dev; 1234 + #endif 1235 + 1258 1236 #ifdef CONFIG_VIDEO_VIVID_CEC 1259 1237 for (i = 0; i < dev->num_outputs; i++) { 1260 1238 struct cec_adapter *adap; ··· 1311 1275 vfd->tvnorms = tvnorms_cap; 1312 1276 video_set_drvdata(vfd, dev); 1313 1277 1278 + #ifdef CONFIG_MEDIA_CONTROLLER 1279 + dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK; 1280 + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad); 1281 + if (ret) 1282 + goto unreg_dev; 1283 + #endif 1284 + 1314 1285 ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]); 1315 1286 if (ret < 0) 1316 1287 goto unreg_dev; ··· 1343 1300 vfd->tvnorms = tvnorms_out; 1344 1301 video_set_drvdata(vfd, dev); 1345 1302 1303 + #ifdef CONFIG_MEDIA_CONTROLLER 1304 + dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE; 1305 + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad); 1306 + if (ret) 1307 + goto unreg_dev; 1308 + #endif 1309 + 1346 1310 ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]); 1347 1311 if (ret < 0) 1348 1312 goto unreg_dev; ··· 1372 1322 vfd->queue = &dev->vb_sdr_cap_q; 1373 1323 vfd->lock = &dev->mutex; 1374 1324 video_set_drvdata(vfd, dev); 1325 + 1326 + #ifdef CONFIG_MEDIA_CONTROLLER 1327 + dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK; 1328 + ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad); 1329 + if (ret) 1330 + goto unreg_dev; 1331 + #endif 1375 1332 1376 1333 ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]); 1377 1334 if (ret < 0) ··· 1426 1369 video_device_node_name(vfd)); 1427 1370 } 1428 1371 1372 + #ifdef CONFIG_MEDIA_CONTROLLER 1373 + /* Register the media device */ 1374 + ret = media_device_register(&dev->mdev); 1375 + if (ret) { 1376 + dev_err(dev->mdev.dev, 1377 + "media device register failed (err=%d)\n", ret); 1378 + goto unreg_dev; 1379 + } 1380 + #endif 1381 + 1429 1382 /* Now that everything is fine, let's add it to device list */ 1430 1383 vivid_devs[inst] = dev; 1431 1384 1432 1385 return 0; 1433 1386 1434 1387 unreg_dev: 1388 + #ifdef CONFIG_MEDIA_CONTROLLER 1389 + media_device_unregister(&dev->mdev); 1390 + #endif 1435 1391 video_unregister_device(&dev->radio_tx_dev); 1436 1392 video_unregister_device(&dev->radio_rx_dev); 1437 1393 video_unregister_device(&dev->sdr_cap_dev); ··· 1514 1444 dev = vivid_devs[i]; 1515 1445 if (!dev) 1516 1446 continue; 1447 + 1448 + #ifdef CONFIG_MEDIA_CONTROLLER 1449 + media_device_unregister(&dev->mdev); 1450 + #endif 1517 1451 1518 1452 if (dev->has_vid_cap) { 1519 1453 v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+8
drivers/media/platform/vivid/vivid-core.h
··· 136 136 struct vivid_dev { 137 137 unsigned inst; 138 138 struct v4l2_device v4l2_dev; 139 + #ifdef CONFIG_MEDIA_CONTROLLER 140 + struct media_device mdev; 141 + struct media_pad vid_cap_pad; 142 + struct media_pad vid_out_pad; 143 + struct media_pad vbi_cap_pad; 144 + struct media_pad vbi_out_pad; 145 + struct media_pad sdr_cap_pad; 146 + #endif 139 147 struct v4l2_ctrl_handler ctrl_hdl_user_gen; 140 148 struct v4l2_ctrl_handler ctrl_hdl_user_vid; 141 149 struct v4l2_ctrl_handler ctrl_hdl_user_aud;
+23 -23
drivers/media/platform/vivid/vivid-ctrls.c
··· 1662 1662 v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true); 1663 1663 1664 1664 if (dev->has_vid_cap) { 1665 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL); 1666 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL); 1667 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL); 1668 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL); 1669 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL); 1670 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL); 1671 - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL); 1665 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL, false); 1666 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL, false); 1667 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL, false); 1668 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL, false); 1669 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL, false); 1670 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL, false); 1671 + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL, false); 1672 1672 if (hdl_vid_cap->error) 1673 1673 return hdl_vid_cap->error; 1674 1674 dev->vid_cap_dev.ctrl_handler = hdl_vid_cap; 1675 1675 } 1676 1676 if (dev->has_vid_out) { 1677 - v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL); 1678 - v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL); 1679 - v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL); 1680 - v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL); 1677 + v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL, false); 1678 + v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL, false); 1679 + v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL, false); 1680 + v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL, false); 1681 1681 if (hdl_vid_out->error) 1682 1682 return hdl_vid_out->error; 1683 1683 dev->vid_out_dev.ctrl_handler = hdl_vid_out; 1684 1684 } 1685 1685 if (dev->has_vbi_cap) { 1686 - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL); 1687 - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL); 1688 - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL); 1689 - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL); 1686 + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL, false); 1687 + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL, false); 1688 + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL, false); 1689 + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL, false); 1690 1690 if (hdl_vbi_cap->error) 1691 1691 return hdl_vbi_cap->error; 1692 1692 dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap; 1693 1693 } 1694 1694 if (dev->has_vbi_out) { 1695 - v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL); 1696 - v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL); 1695 + v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL, false); 1696 + v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL, false); 1697 1697 if (hdl_vbi_out->error) 1698 1698 return hdl_vbi_out->error; 1699 1699 dev->vbi_out_dev.ctrl_handler = hdl_vbi_out; 1700 1700 } 1701 1701 if (dev->has_radio_rx) { 1702 - v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL); 1703 - v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL); 1702 + v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL, false); 1703 + v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL, false); 1704 1704 if (hdl_radio_rx->error) 1705 1705 return hdl_radio_rx->error; 1706 1706 dev->radio_rx_dev.ctrl_handler = hdl_radio_rx; 1707 1707 } 1708 1708 if (dev->has_radio_tx) { 1709 - v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL); 1710 - v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL); 1709 + v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL, false); 1710 + v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL, false); 1711 1711 if (hdl_radio_tx->error) 1712 1712 return hdl_radio_tx->error; 1713 1713 dev->radio_tx_dev.ctrl_handler = hdl_radio_tx; 1714 1714 } 1715 1715 if (dev->has_sdr_cap) { 1716 - v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL); 1717 - v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL); 1716 + v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL, false); 1717 + v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL, false); 1718 1718 if (hdl_sdr_cap->error) 1719 1719 return hdl_sdr_cap->error; 1720 1720 dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
+12
drivers/media/platform/vivid/vivid-kthread-cap.c
··· 703 703 goto update_mv; 704 704 705 705 if (vid_cap_buf) { 706 + v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req, 707 + &dev->ctrl_hdl_vid_cap); 706 708 /* Fill buffer */ 707 709 vivid_fillbuff(dev, vid_cap_buf); 708 710 dprintk(dev, 1, "filled buffer %d\n", ··· 715 713 dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc) 716 714 vivid_overlay(dev, vid_cap_buf); 717 715 716 + v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req, 717 + &dev->ctrl_hdl_vid_cap); 718 718 vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ? 719 719 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 720 720 dprintk(dev, 2, "vid_cap buffer %d done\n", ··· 724 720 } 725 721 726 722 if (vbi_cap_buf) { 723 + v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req, 724 + &dev->ctrl_hdl_vbi_cap); 727 725 if (dev->stream_sliced_vbi_cap) 728 726 vivid_sliced_vbi_cap_process(dev, vbi_cap_buf); 729 727 else 730 728 vivid_raw_vbi_cap_process(dev, vbi_cap_buf); 729 + v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req, 730 + &dev->ctrl_hdl_vbi_cap); 731 731 vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ? 732 732 VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); 733 733 dprintk(dev, 2, "vbi_cap %d done\n", ··· 899 891 buf = list_entry(dev->vid_cap_active.next, 900 892 struct vivid_buffer, list); 901 893 list_del(&buf->list); 894 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 895 + &dev->ctrl_hdl_vid_cap); 902 896 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 903 897 dprintk(dev, 2, "vid_cap buffer %d done\n", 904 898 buf->vb.vb2_buf.index); ··· 914 904 buf = list_entry(dev->vbi_cap_active.next, 915 905 struct vivid_buffer, list); 916 906 list_del(&buf->list); 907 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 908 + &dev->ctrl_hdl_vbi_cap); 917 909 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 918 910 dprintk(dev, 2, "vbi_cap buffer %d done\n", 919 911 buf->vb.vb2_buf.index);
+12
drivers/media/platform/vivid/vivid-kthread-out.c
··· 75 75 return; 76 76 77 77 if (vid_out_buf) { 78 + v4l2_ctrl_request_setup(vid_out_buf->vb.vb2_buf.req_obj.req, 79 + &dev->ctrl_hdl_vid_out); 80 + v4l2_ctrl_request_complete(vid_out_buf->vb.vb2_buf.req_obj.req, 81 + &dev->ctrl_hdl_vid_out); 78 82 vid_out_buf->vb.sequence = dev->vid_out_seq_count; 79 83 if (dev->field_out == V4L2_FIELD_ALTERNATE) { 80 84 /* ··· 96 92 } 97 93 98 94 if (vbi_out_buf) { 95 + v4l2_ctrl_request_setup(vbi_out_buf->vb.vb2_buf.req_obj.req, 96 + &dev->ctrl_hdl_vbi_out); 97 + v4l2_ctrl_request_complete(vbi_out_buf->vb.vb2_buf.req_obj.req, 98 + &dev->ctrl_hdl_vbi_out); 99 99 if (dev->stream_sliced_vbi_out) 100 100 vivid_sliced_vbi_out_process(dev, vbi_out_buf); 101 101 ··· 270 262 buf = list_entry(dev->vid_out_active.next, 271 263 struct vivid_buffer, list); 272 264 list_del(&buf->list); 265 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 266 + &dev->ctrl_hdl_vid_out); 273 267 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 274 268 dprintk(dev, 2, "vid_out buffer %d done\n", 275 269 buf->vb.vb2_buf.index); ··· 285 275 buf = list_entry(dev->vbi_out_active.next, 286 276 struct vivid_buffer, list); 287 277 list_del(&buf->list); 278 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 279 + &dev->ctrl_hdl_vbi_out); 288 280 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 289 281 dprintk(dev, 2, "vbi_out buffer %d done\n", 290 282 buf->vb.vb2_buf.index);
+16
drivers/media/platform/vivid/vivid-sdr-cap.c
··· 102 102 103 103 if (sdr_cap_buf) { 104 104 sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count; 105 + v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req, 106 + &dev->ctrl_hdl_sdr_cap); 107 + v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req, 108 + &dev->ctrl_hdl_sdr_cap); 105 109 vivid_sdr_cap_process(dev, sdr_cap_buf); 106 110 sdr_cap_buf->vb.vb2_buf.timestamp = 107 111 ktime_get_ns() + dev->time_wrap_offset; ··· 276 272 277 273 list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { 278 274 list_del(&buf->list); 275 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 276 + &dev->ctrl_hdl_sdr_cap); 279 277 vb2_buffer_done(&buf->vb.vb2_buf, 280 278 VB2_BUF_STATE_QUEUED); 281 279 } ··· 299 293 buf = list_entry(dev->sdr_cap_active.next, 300 294 struct vivid_buffer, list); 301 295 list_del(&buf->list); 296 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 297 + &dev->ctrl_hdl_sdr_cap); 302 298 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 303 299 } 304 300 ··· 311 303 mutex_lock(&dev->mutex); 312 304 } 313 305 306 + static void sdr_cap_buf_request_complete(struct vb2_buffer *vb) 307 + { 308 + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 309 + 310 + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_sdr_cap); 311 + } 312 + 314 313 const struct vb2_ops vivid_sdr_cap_qops = { 315 314 .queue_setup = sdr_cap_queue_setup, 316 315 .buf_prepare = sdr_cap_buf_prepare, 317 316 .buf_queue = sdr_cap_buf_queue, 318 317 .start_streaming = sdr_cap_start_streaming, 319 318 .stop_streaming = sdr_cap_stop_streaming, 319 + .buf_request_complete = sdr_cap_buf_request_complete, 320 320 .wait_prepare = vb2_ops_wait_prepare, 321 321 .wait_finish = vb2_ops_wait_finish, 322 322 };
+10
drivers/media/platform/vivid/vivid-vbi-cap.c
··· 204 204 205 205 list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { 206 206 list_del(&buf->list); 207 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 208 + &dev->ctrl_hdl_vbi_cap); 207 209 vb2_buffer_done(&buf->vb.vb2_buf, 208 210 VB2_BUF_STATE_QUEUED); 209 211 } ··· 222 220 vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming); 223 221 } 224 222 223 + static void vbi_cap_buf_request_complete(struct vb2_buffer *vb) 224 + { 225 + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 226 + 227 + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap); 228 + } 229 + 225 230 const struct vb2_ops vivid_vbi_cap_qops = { 226 231 .queue_setup = vbi_cap_queue_setup, 227 232 .buf_prepare = vbi_cap_buf_prepare, 228 233 .buf_queue = vbi_cap_buf_queue, 229 234 .start_streaming = vbi_cap_start_streaming, 230 235 .stop_streaming = vbi_cap_stop_streaming, 236 + .buf_request_complete = vbi_cap_buf_request_complete, 231 237 .wait_prepare = vb2_ops_wait_prepare, 232 238 .wait_finish = vb2_ops_wait_finish, 233 239 };
+10
drivers/media/platform/vivid/vivid-vbi-out.c
··· 96 96 97 97 list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { 98 98 list_del(&buf->list); 99 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 100 + &dev->ctrl_hdl_vbi_out); 99 101 vb2_buffer_done(&buf->vb.vb2_buf, 100 102 VB2_BUF_STATE_QUEUED); 101 103 } ··· 117 115 dev->vbi_out_have_cc[1] = false; 118 116 } 119 117 118 + static void vbi_out_buf_request_complete(struct vb2_buffer *vb) 119 + { 120 + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 121 + 122 + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out); 123 + } 124 + 120 125 const struct vb2_ops vivid_vbi_out_qops = { 121 126 .queue_setup = vbi_out_queue_setup, 122 127 .buf_prepare = vbi_out_buf_prepare, 123 128 .buf_queue = vbi_out_buf_queue, 124 129 .start_streaming = vbi_out_start_streaming, 125 130 .stop_streaming = vbi_out_stop_streaming, 131 + .buf_request_complete = vbi_out_buf_request_complete, 126 132 .wait_prepare = vb2_ops_wait_prepare, 127 133 .wait_finish = vb2_ops_wait_finish, 128 134 };
+10
drivers/media/platform/vivid/vivid-vid-cap.c
··· 243 243 244 244 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 245 245 list_del(&buf->list); 246 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 247 + &dev->ctrl_hdl_vid_cap); 246 248 vb2_buffer_done(&buf->vb.vb2_buf, 247 249 VB2_BUF_STATE_QUEUED); 248 250 } ··· 262 260 dev->can_loop_video = false; 263 261 } 264 262 263 + static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 264 + { 265 + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 266 + 267 + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 268 + } 269 + 265 270 const struct vb2_ops vivid_vid_cap_qops = { 266 271 .queue_setup = vid_cap_queue_setup, 267 272 .buf_prepare = vid_cap_buf_prepare, ··· 276 267 .buf_queue = vid_cap_buf_queue, 277 268 .start_streaming = vid_cap_start_streaming, 278 269 .stop_streaming = vid_cap_stop_streaming, 270 + .buf_request_complete = vid_cap_buf_request_complete, 279 271 .wait_prepare = vb2_ops_wait_prepare, 280 272 .wait_finish = vb2_ops_wait_finish, 281 273 };
+10
drivers/media/platform/vivid/vivid-vid-out.c
··· 162 162 163 163 list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { 164 164 list_del(&buf->list); 165 + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, 166 + &dev->ctrl_hdl_vid_out); 165 167 vb2_buffer_done(&buf->vb.vb2_buf, 166 168 VB2_BUF_STATE_QUEUED); 167 169 } ··· 181 179 dev->can_loop_video = false; 182 180 } 183 181 182 + static void vid_out_buf_request_complete(struct vb2_buffer *vb) 183 + { 184 + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 185 + 186 + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out); 187 + } 188 + 184 189 const struct vb2_ops vivid_vid_out_qops = { 185 190 .queue_setup = vid_out_queue_setup, 186 191 .buf_prepare = vid_out_buf_prepare, 187 192 .buf_queue = vid_out_buf_queue, 188 193 .start_streaming = vid_out_start_streaming, 189 194 .stop_streaming = vid_out_stop_streaming, 195 + .buf_request_complete = vid_out_buf_request_complete, 190 196 .wait_prepare = vb2_ops_wait_prepare, 191 197 .wait_finish = vb2_ops_wait_finish, 192 198 };
+1 -1
drivers/media/usb/cpia2/cpia2_v4l.c
··· 949 949 buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; 950 950 buf->length = cam->frame_size; 951 951 buf->reserved2 = 0; 952 - buf->reserved = 0; 952 + buf->request_fd = 0; 953 953 memset(&buf->timecode, 0, sizeof(buf->timecode)); 954 954 955 955 DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index,
+1 -1
drivers/media/usb/cx231xx/cx231xx-417.c
··· 1992 1992 dev->mpeg_ctrl_handler.ops = &cx231xx_ops; 1993 1993 if (dev->sd_cx25840) 1994 1994 v4l2_ctrl_add_handler(&dev->mpeg_ctrl_handler.hdl, 1995 - dev->sd_cx25840->ctrl_handler, NULL); 1995 + dev->sd_cx25840->ctrl_handler, NULL, false); 1996 1996 if (dev->mpeg_ctrl_handler.hdl.error) { 1997 1997 err = dev->mpeg_ctrl_handler.hdl.error; 1998 1998 dprintk(3, "%s: can't add cx25840 controls\n", dev->name);
+2 -2
drivers/media/usb/cx231xx/cx231xx-video.c
··· 2204 2204 2205 2205 if (dev->sd_cx25840) { 2206 2206 v4l2_ctrl_add_handler(&dev->ctrl_handler, 2207 - dev->sd_cx25840->ctrl_handler, NULL); 2207 + dev->sd_cx25840->ctrl_handler, NULL, true); 2208 2208 v4l2_ctrl_add_handler(&dev->radio_ctrl_handler, 2209 2209 dev->sd_cx25840->ctrl_handler, 2210 - v4l2_ctrl_radio_filter); 2210 + v4l2_ctrl_radio_filter, true); 2211 2211 } 2212 2212 2213 2213 if (dev->ctrl_handler.error)
+1 -1
drivers/media/usb/msi2500/msi2500.c
··· 1278 1278 } 1279 1279 1280 1280 /* currently all controls are from subdev */ 1281 - v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL); 1281 + v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL, true); 1282 1282 1283 1283 dev->v4l2_dev.ctrl_handler = &dev->hdl; 1284 1284 dev->vdev.v4l2_dev = &dev->v4l2_dev;
+1 -1
drivers/media/usb/tm6000/tm6000-video.c
··· 1627 1627 v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops, 1628 1628 V4L2_CID_HUE, -128, 127, 1, 0); 1629 1629 v4l2_ctrl_add_handler(&dev->ctrl_handler, 1630 - &dev->radio_ctrl_handler, NULL); 1630 + &dev->radio_ctrl_handler, NULL, false); 1631 1631 1632 1632 if (dev->radio_ctrl_handler.error) 1633 1633 ret = dev->radio_ctrl_handler.error;
+3 -2
drivers/media/usb/uvc/uvc_queue.c
··· 300 300 return ret; 301 301 } 302 302 303 - int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 303 + int uvc_queue_buffer(struct uvc_video_queue *queue, 304 + struct media_device *mdev, struct v4l2_buffer *buf) 304 305 { 305 306 int ret; 306 307 307 308 mutex_lock(&queue->mutex); 308 - ret = vb2_qbuf(&queue->queue, buf); 309 + ret = vb2_qbuf(&queue->queue, mdev, buf); 309 310 mutex_unlock(&queue->mutex); 310 311 311 312 return ret;
+2 -1
drivers/media/usb/uvc/uvc_v4l2.c
··· 751 751 if (!uvc_has_privileges(handle)) 752 752 return -EBUSY; 753 753 754 - return uvc_queue_buffer(&stream->queue, buf); 754 + return uvc_queue_buffer(&stream->queue, 755 + stream->vdev.v4l2_dev->mdev, buf); 755 756 } 756 757 757 758 static int uvc_ioctl_expbuf(struct file *file, void *fh,
+1
drivers/media/usb/uvc/uvcvideo.h
··· 700 700 int uvc_create_buffers(struct uvc_video_queue *queue, 701 701 struct v4l2_create_buffers *v4l2_cb); 702 702 int uvc_queue_buffer(struct uvc_video_queue *queue, 703 + struct media_device *mdev, 703 704 struct v4l2_buffer *v4l2_buf); 704 705 int uvc_export_buffer(struct uvc_video_queue *queue, 705 706 struct v4l2_exportbuffer *exp);
+14 -5
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
··· 244 244 * return: number of created buffers 245 245 * @memory: buffer memory type 246 246 * @format: frame format, for which buffers are requested 247 + * @capabilities: capabilities of this buffer type. 247 248 * @reserved: future extensions 248 249 */ 249 250 struct v4l2_create_buffers32 { ··· 252 251 __u32 count; 253 252 __u32 memory; /* enum v4l2_memory */ 254 253 struct v4l2_format32 format; 255 - __u32 reserved[8]; 254 + __u32 capabilities; 255 + __u32 reserved[7]; 256 256 }; 257 257 258 258 static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) ··· 413 411 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 414 412 copy_in_user(p32, p64, 415 413 offsetof(struct v4l2_create_buffers32, format)) || 414 + assign_in_user(&p32->capabilities, &p64->capabilities) || 416 415 copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) 417 416 return -EFAULT; 418 417 return __put_v4l2_format32(&p64->format, &p32->format); ··· 485 482 } m; 486 483 __u32 length; 487 484 __u32 reserved2; 488 - __u32 reserved; 485 + __s32 request_fd; 489 486 }; 490 487 491 488 static int get_v4l2_plane32(struct v4l2_plane __user *p64, ··· 584 581 { 585 582 u32 type; 586 583 u32 length; 584 + s32 request_fd; 587 585 enum v4l2_memory memory; 588 586 struct v4l2_plane32 __user *uplane32; 589 587 struct v4l2_plane __user *uplane; ··· 599 595 get_user(memory, &p32->memory) || 600 596 put_user(memory, &p64->memory) || 601 597 get_user(length, &p32->length) || 602 - put_user(length, &p64->length)) 598 + put_user(length, &p64->length) || 599 + get_user(request_fd, &p32->request_fd) || 600 + put_user(request_fd, &p64->request_fd)) 603 601 return -EFAULT; 604 602 605 603 if (V4L2_TYPE_IS_OUTPUT(type)) ··· 705 699 copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) || 706 700 assign_in_user(&p32->sequence, &p64->sequence) || 707 701 assign_in_user(&p32->reserved2, &p64->reserved2) || 708 - assign_in_user(&p32->reserved, &p64->reserved) || 702 + assign_in_user(&p32->request_fd, &p64->request_fd) || 709 703 get_user(length, &p64->length) || 710 704 put_user(length, &p32->length)) 711 705 return -EFAULT; ··· 840 834 __u32 which; 841 835 __u32 count; 842 836 __u32 error_idx; 843 - __u32 reserved[2]; 837 + __s32 request_fd; 838 + __u32 reserved[1]; 844 839 compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */ 845 840 }; 846 841 ··· 916 909 get_user(count, &p32->count) || 917 910 put_user(count, &p64->count) || 918 911 assign_in_user(&p64->error_idx, &p32->error_idx) || 912 + assign_in_user(&p64->request_fd, &p32->request_fd) || 919 913 copy_in_user(p64->reserved, p32->reserved, sizeof(p64->reserved))) 920 914 return -EFAULT; 921 915 ··· 982 974 get_user(count, &p64->count) || 983 975 put_user(count, &p32->count) || 984 976 assign_in_user(&p32->error_idx, &p64->error_idx) || 977 + assign_in_user(&p32->request_fd, &p64->request_fd) || 985 978 copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)) || 986 979 get_user(kcontrols, &p64->controls)) 987 980 return -EFAULT;
+585 -27
drivers/media/v4l2-core/v4l2-ctrls.c
··· 37 37 struct v4l2_ctrl_helper { 38 38 /* Pointer to the control reference of the master control */ 39 39 struct v4l2_ctrl_ref *mref; 40 - /* The control corresponding to the v4l2_ext_control ID field. */ 41 - struct v4l2_ctrl *ctrl; 40 + /* The control ref corresponding to the v4l2_ext_control ID field. */ 41 + struct v4l2_ctrl_ref *ref; 42 42 /* v4l2_ext_control index of the next control belonging to the 43 43 same cluster, or 0 if there isn't any. */ 44 44 u32 next; ··· 844 844 case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; 845 845 case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; 846 846 case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; 847 + case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters"; 848 + case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices"; 847 849 848 850 /* VPX controls */ 849 851 case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; ··· 1294 1292 case V4L2_CID_RDS_TX_ALT_FREQS: 1295 1293 *type = V4L2_CTRL_TYPE_U32; 1296 1294 break; 1295 + case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: 1296 + *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS; 1297 + break; 1298 + case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: 1299 + *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION; 1300 + break; 1297 1301 default: 1298 1302 *type = V4L2_CTRL_TYPE_INTEGER; 1299 1303 break; ··· 1558 1550 static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, 1559 1551 union v4l2_ctrl_ptr ptr) 1560 1552 { 1553 + struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; 1561 1554 size_t len; 1562 1555 u64 offset; 1563 1556 s64 val; ··· 1621 1612 return -ERANGE; 1622 1613 return 0; 1623 1614 1615 + case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS: 1616 + p_mpeg2_slice_params = ptr.p; 1617 + 1618 + switch (p_mpeg2_slice_params->sequence.chroma_format) { 1619 + case 1: /* 4:2:0 */ 1620 + case 2: /* 4:2:2 */ 1621 + case 3: /* 4:4:4 */ 1622 + break; 1623 + default: 1624 + return -EINVAL; 1625 + } 1626 + 1627 + switch (p_mpeg2_slice_params->picture.intra_dc_precision) { 1628 + case 0: /* 8 bits */ 1629 + case 1: /* 9 bits */ 1630 + case 11: /* 11 bits */ 1631 + break; 1632 + default: 1633 + return -EINVAL; 1634 + } 1635 + 1636 + switch (p_mpeg2_slice_params->picture.picture_structure) { 1637 + case 1: /* interlaced top field */ 1638 + case 2: /* interlaced bottom field */ 1639 + case 3: /* progressive */ 1640 + break; 1641 + default: 1642 + return -EINVAL; 1643 + } 1644 + 1645 + switch (p_mpeg2_slice_params->picture.picture_coding_type) { 1646 + case V4L2_MPEG2_PICTURE_CODING_TYPE_I: 1647 + case V4L2_MPEG2_PICTURE_CODING_TYPE_P: 1648 + case V4L2_MPEG2_PICTURE_CODING_TYPE_B: 1649 + break; 1650 + default: 1651 + return -EINVAL; 1652 + } 1653 + 1654 + if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME || 1655 + p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) 1656 + return -EINVAL; 1657 + 1658 + return 0; 1659 + 1660 + case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: 1661 + return 0; 1662 + 1624 1663 default: 1625 1664 return -EINVAL; 1626 1665 } ··· 1723 1666 struct v4l2_ctrl *ctrl) 1724 1667 { 1725 1668 return ptr_to_user(c, ctrl, ctrl->p_new); 1669 + } 1670 + 1671 + /* Helper function: copy the request value back to the caller */ 1672 + static int req_to_user(struct v4l2_ext_control *c, 1673 + struct v4l2_ctrl_ref *ref) 1674 + { 1675 + return ptr_to_user(c, ref->ctrl, ref->p_req); 1726 1676 } 1727 1677 1728 1678 /* Helper function: copy the initial control value back to the caller */ ··· 1851 1787 ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new); 1852 1788 } 1853 1789 1790 + /* Copy the new value to the request value */ 1791 + static void new_to_req(struct v4l2_ctrl_ref *ref) 1792 + { 1793 + if (!ref) 1794 + return; 1795 + ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req); 1796 + ref->req = ref; 1797 + } 1798 + 1799 + /* Copy the request value to the new value */ 1800 + static void req_to_new(struct v4l2_ctrl_ref *ref) 1801 + { 1802 + if (!ref) 1803 + return; 1804 + if (ref->req) 1805 + ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new); 1806 + else 1807 + ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new); 1808 + } 1809 + 1854 1810 /* Return non-zero if one or more of the controls in the cluster has a new 1855 1811 value that differs from the current value. */ 1856 1812 static int cluster_changed(struct v4l2_ctrl *master) ··· 1980 1896 lockdep_set_class_and_name(hdl->lock, key, name); 1981 1897 INIT_LIST_HEAD(&hdl->ctrls); 1982 1898 INIT_LIST_HEAD(&hdl->ctrl_refs); 1899 + INIT_LIST_HEAD(&hdl->requests); 1900 + INIT_LIST_HEAD(&hdl->requests_queued); 1901 + hdl->request_is_queued = false; 1983 1902 hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; 1984 1903 hdl->buckets = kvmalloc_array(hdl->nr_of_buckets, 1985 1904 sizeof(hdl->buckets[0]), 1986 1905 GFP_KERNEL | __GFP_ZERO); 1987 1906 hdl->error = hdl->buckets ? 0 : -ENOMEM; 1907 + media_request_object_init(&hdl->req_obj); 1988 1908 return hdl->error; 1989 1909 } 1990 1910 EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); ··· 2003 1915 if (hdl == NULL || hdl->buckets == NULL) 2004 1916 return; 2005 1917 1918 + if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { 1919 + struct v4l2_ctrl_handler *req, *next_req; 1920 + 1921 + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { 1922 + media_request_object_unbind(&req->req_obj); 1923 + media_request_object_put(&req->req_obj); 1924 + } 1925 + } 2006 1926 mutex_lock(hdl->lock); 2007 1927 /* Free all nodes */ 2008 1928 list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { ··· 2112 2016 2113 2017 /* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ 2114 2018 static int handler_new_ref(struct v4l2_ctrl_handler *hdl, 2115 - struct v4l2_ctrl *ctrl) 2019 + struct v4l2_ctrl *ctrl, 2020 + struct v4l2_ctrl_ref **ctrl_ref, 2021 + bool from_other_dev, bool allocate_req) 2116 2022 { 2117 2023 struct v4l2_ctrl_ref *ref; 2118 2024 struct v4l2_ctrl_ref *new_ref; 2119 2025 u32 id = ctrl->id; 2120 2026 u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; 2121 2027 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ 2028 + unsigned int size_extra_req = 0; 2029 + 2030 + if (ctrl_ref) 2031 + *ctrl_ref = NULL; 2122 2032 2123 2033 /* 2124 2034 * Automatically add the control class if it is not yet present and ··· 2138 2036 if (hdl->error) 2139 2037 return hdl->error; 2140 2038 2141 - new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL); 2039 + if (allocate_req) 2040 + size_extra_req = ctrl->elems * ctrl->elem_size; 2041 + new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); 2142 2042 if (!new_ref) 2143 2043 return handler_set_err(hdl, -ENOMEM); 2144 2044 new_ref->ctrl = ctrl; 2045 + new_ref->from_other_dev = from_other_dev; 2046 + if (size_extra_req) 2047 + new_ref->p_req.p = &new_ref[1]; 2048 + 2145 2049 if (ctrl->handler == hdl) { 2146 2050 /* By default each control starts in a cluster of its own. 2147 2051 new_ref->ctrl is basically a cluster array with one ··· 2187 2079 /* Insert the control node in the hash */ 2188 2080 new_ref->next = hdl->buckets[bucket]; 2189 2081 hdl->buckets[bucket] = new_ref; 2082 + if (ctrl_ref) 2083 + *ctrl_ref = new_ref; 2190 2084 2191 2085 unlock: 2192 2086 mutex_unlock(hdl->lock); ··· 2242 2132 break; 2243 2133 case V4L2_CTRL_TYPE_U32: 2244 2134 elem_size = sizeof(u32); 2135 + break; 2136 + case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS: 2137 + elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params); 2138 + break; 2139 + case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: 2140 + elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization); 2245 2141 break; 2246 2142 default: 2247 2143 if (type < V4L2_CTRL_COMPOUND_TYPES) ··· 2336 2220 ctrl->type_ops->init(ctrl, idx, ctrl->p_new); 2337 2221 } 2338 2222 2339 - if (handler_new_ref(hdl, ctrl)) { 2223 + if (handler_new_ref(hdl, ctrl, NULL, false, false)) { 2340 2224 kvfree(ctrl); 2341 2225 return NULL; 2342 2226 } ··· 2505 2389 /* Add the controls from another handler to our own. */ 2506 2390 int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, 2507 2391 struct v4l2_ctrl_handler *add, 2508 - bool (*filter)(const struct v4l2_ctrl *ctrl)) 2392 + bool (*filter)(const struct v4l2_ctrl *ctrl), 2393 + bool from_other_dev) 2509 2394 { 2510 2395 struct v4l2_ctrl_ref *ref; 2511 2396 int ret = 0; ··· 2529 2412 /* Filter any unwanted controls */ 2530 2413 if (filter && !filter(ctrl)) 2531 2414 continue; 2532 - ret = handler_new_ref(hdl, ctrl); 2415 + ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); 2533 2416 if (ret) 2534 2417 break; 2535 2418 } ··· 2932 2815 } 2933 2816 EXPORT_SYMBOL(v4l2_querymenu); 2934 2817 2818 + static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, 2819 + const struct v4l2_ctrl_handler *from) 2820 + { 2821 + struct v4l2_ctrl_ref *ref; 2822 + int err = 0; 2823 + 2824 + if (WARN_ON(!hdl || hdl == from)) 2825 + return -EINVAL; 2826 + 2827 + if (hdl->error) 2828 + return hdl->error; 2829 + 2830 + WARN_ON(hdl->lock != &hdl->_lock); 2831 + 2832 + mutex_lock(from->lock); 2833 + list_for_each_entry(ref, &from->ctrl_refs, node) { 2834 + struct v4l2_ctrl *ctrl = ref->ctrl; 2835 + struct v4l2_ctrl_ref *new_ref; 2836 + 2837 + /* Skip refs inherited from other devices */ 2838 + if (ref->from_other_dev) 2839 + continue; 2840 + /* And buttons */ 2841 + if (ctrl->type == V4L2_CTRL_TYPE_BUTTON) 2842 + continue; 2843 + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); 2844 + if (err) 2845 + break; 2846 + } 2847 + mutex_unlock(from->lock); 2848 + return err; 2849 + } 2850 + 2851 + static void v4l2_ctrl_request_queue(struct media_request_object *obj) 2852 + { 2853 + struct v4l2_ctrl_handler *hdl = 2854 + container_of(obj, struct v4l2_ctrl_handler, req_obj); 2855 + struct v4l2_ctrl_handler *main_hdl = obj->priv; 2856 + struct v4l2_ctrl_handler *prev_hdl = NULL; 2857 + struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; 2858 + 2859 + if (list_empty(&main_hdl->requests_queued)) 2860 + goto queue; 2861 + 2862 + prev_hdl = list_last_entry(&main_hdl->requests_queued, 2863 + struct v4l2_ctrl_handler, requests_queued); 2864 + /* 2865 + * Note: prev_hdl and hdl must contain the same list of control 2866 + * references, so if any differences are detected then that is a 2867 + * driver bug and the WARN_ON is triggered. 2868 + */ 2869 + mutex_lock(prev_hdl->lock); 2870 + ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs, 2871 + struct v4l2_ctrl_ref, node); 2872 + list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) { 2873 + if (ref_ctrl->req) 2874 + continue; 2875 + while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) { 2876 + /* Should never happen, but just in case... */ 2877 + if (list_is_last(&ref_ctrl_prev->node, 2878 + &prev_hdl->ctrl_refs)) 2879 + break; 2880 + ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node); 2881 + } 2882 + if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id)) 2883 + break; 2884 + ref_ctrl->req = ref_ctrl_prev->req; 2885 + } 2886 + mutex_unlock(prev_hdl->lock); 2887 + queue: 2888 + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); 2889 + hdl->request_is_queued = true; 2890 + } 2891 + 2892 + static void v4l2_ctrl_request_unbind(struct media_request_object *obj) 2893 + { 2894 + struct v4l2_ctrl_handler *hdl = 2895 + container_of(obj, struct v4l2_ctrl_handler, req_obj); 2896 + 2897 + list_del_init(&hdl->requests); 2898 + if (hdl->request_is_queued) { 2899 + list_del_init(&hdl->requests_queued); 2900 + hdl->request_is_queued = false; 2901 + } 2902 + } 2903 + 2904 + static void v4l2_ctrl_request_release(struct media_request_object *obj) 2905 + { 2906 + struct v4l2_ctrl_handler *hdl = 2907 + container_of(obj, struct v4l2_ctrl_handler, req_obj); 2908 + 2909 + v4l2_ctrl_handler_free(hdl); 2910 + kfree(hdl); 2911 + } 2912 + 2913 + static const struct media_request_object_ops req_ops = { 2914 + .queue = v4l2_ctrl_request_queue, 2915 + .unbind = v4l2_ctrl_request_unbind, 2916 + .release = v4l2_ctrl_request_release, 2917 + }; 2918 + 2919 + struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, 2920 + struct v4l2_ctrl_handler *parent) 2921 + { 2922 + struct media_request_object *obj; 2923 + 2924 + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && 2925 + req->state != MEDIA_REQUEST_STATE_QUEUED)) 2926 + return NULL; 2927 + 2928 + obj = media_request_object_find(req, &req_ops, parent); 2929 + if (obj) 2930 + return container_of(obj, struct v4l2_ctrl_handler, req_obj); 2931 + return NULL; 2932 + } 2933 + EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); 2934 + 2935 + struct v4l2_ctrl * 2936 + v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) 2937 + { 2938 + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); 2939 + 2940 + return (ref && ref->req == ref) ? ref->ctrl : NULL; 2941 + } 2942 + EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); 2943 + 2944 + static int v4l2_ctrl_request_bind(struct media_request *req, 2945 + struct v4l2_ctrl_handler *hdl, 2946 + struct v4l2_ctrl_handler *from) 2947 + { 2948 + int ret; 2949 + 2950 + ret = v4l2_ctrl_request_clone(hdl, from); 2951 + 2952 + if (!ret) { 2953 + ret = media_request_object_bind(req, &req_ops, 2954 + from, false, &hdl->req_obj); 2955 + if (!ret) 2956 + list_add_tail(&hdl->requests, &from->requests); 2957 + } 2958 + return ret; 2959 + } 2935 2960 2936 2961 /* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: 2937 2962 ··· 3135 2876 3136 2877 if (cs->which && 3137 2878 cs->which != V4L2_CTRL_WHICH_DEF_VAL && 2879 + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && 3138 2880 V4L2_CTRL_ID2WHICH(id) != cs->which) 3139 2881 return -EINVAL; 3140 2882 ··· 3146 2886 ref = find_ref_lock(hdl, id); 3147 2887 if (ref == NULL) 3148 2888 return -EINVAL; 2889 + h->ref = ref; 3149 2890 ctrl = ref->ctrl; 3150 2891 if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) 3151 2892 return -EINVAL; ··· 3169 2908 } 3170 2909 /* Store the ref to the master control of the cluster */ 3171 2910 h->mref = ref; 3172 - h->ctrl = ctrl; 3173 2911 /* Initially set next to 0, meaning that there is no other 3174 2912 control in this helper array belonging to the same 3175 2913 cluster */ ··· 3215 2955 whether there are any controls at all. */ 3216 2956 static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) 3217 2957 { 3218 - if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) 2958 + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || 2959 + which == V4L2_CTRL_WHICH_REQUEST_VAL) 3219 2960 return 0; 3220 2961 return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; 3221 2962 } 3222 2963 3223 - 3224 - 3225 2964 /* Get extended controls. Allocates the helpers array if needed. */ 3226 - int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) 2965 + static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, 2966 + struct v4l2_ext_controls *cs) 3227 2967 { 3228 2968 struct v4l2_ctrl_helper helper[4]; 3229 2969 struct v4l2_ctrl_helper *helpers = helper; ··· 3253 2993 cs->error_idx = cs->count; 3254 2994 3255 2995 for (i = 0; !ret && i < cs->count; i++) 3256 - if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) 2996 + if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) 3257 2997 ret = -EACCES; 3258 2998 3259 2999 for (i = 0; !ret && i < cs->count; i++) { ··· 3287 3027 u32 idx = i; 3288 3028 3289 3029 do { 3290 - ret = ctrl_to_user(cs->controls + idx, 3291 - helpers[idx].ctrl); 3030 + if (helpers[idx].ref->req) 3031 + ret = req_to_user(cs->controls + idx, 3032 + helpers[idx].ref->req); 3033 + else 3034 + ret = ctrl_to_user(cs->controls + idx, 3035 + helpers[idx].ref->ctrl); 3292 3036 idx = helpers[idx].next; 3293 3037 } while (!ret && idx); 3294 3038 } ··· 3301 3037 3302 3038 if (cs->count > ARRAY_SIZE(helper)) 3303 3039 kvfree(helpers); 3040 + return ret; 3041 + } 3042 + 3043 + static struct media_request_object * 3044 + v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, 3045 + struct media_request *req, bool set) 3046 + { 3047 + struct media_request_object *obj; 3048 + struct v4l2_ctrl_handler *new_hdl; 3049 + int ret; 3050 + 3051 + if (IS_ERR(req)) 3052 + return ERR_CAST(req); 3053 + 3054 + if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) 3055 + return ERR_PTR(-EBUSY); 3056 + 3057 + obj = media_request_object_find(req, &req_ops, hdl); 3058 + if (obj) 3059 + return obj; 3060 + if (!set) 3061 + return ERR_PTR(-ENOENT); 3062 + 3063 + new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL); 3064 + if (!new_hdl) 3065 + return ERR_PTR(-ENOMEM); 3066 + 3067 + obj = &new_hdl->req_obj; 3068 + ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); 3069 + if (!ret) 3070 + ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); 3071 + if (ret) { 3072 + kfree(new_hdl); 3073 + 3074 + return ERR_PTR(ret); 3075 + } 3076 + 3077 + media_request_object_get(obj); 3078 + return obj; 3079 + } 3080 + 3081 + int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, 3082 + struct v4l2_ext_controls *cs) 3083 + { 3084 + struct media_request_object *obj = NULL; 3085 + struct media_request *req = NULL; 3086 + int ret; 3087 + 3088 + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { 3089 + if (!mdev || cs->request_fd < 0) 3090 + return -EINVAL; 3091 + 3092 + req = media_request_get_by_fd(mdev, cs->request_fd); 3093 + if (IS_ERR(req)) 3094 + return PTR_ERR(req); 3095 + 3096 + if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { 3097 + media_request_put(req); 3098 + return -EACCES; 3099 + } 3100 + 3101 + ret = media_request_lock_for_access(req); 3102 + if (ret) { 3103 + media_request_put(req); 3104 + return ret; 3105 + } 3106 + 3107 + obj = v4l2_ctrls_find_req_obj(hdl, req, false); 3108 + if (IS_ERR(obj)) { 3109 + media_request_unlock_for_access(req); 3110 + media_request_put(req); 3111 + return PTR_ERR(obj); 3112 + } 3113 + 3114 + hdl = container_of(obj, struct v4l2_ctrl_handler, 3115 + req_obj); 3116 + } 3117 + 3118 + ret = v4l2_g_ext_ctrls_common(hdl, cs); 3119 + 3120 + if (obj) { 3121 + media_request_unlock_for_access(req); 3122 + media_request_object_put(obj); 3123 + media_request_put(req); 3124 + } 3304 3125 return ret; 3305 3126 } 3306 3127 EXPORT_SYMBOL(v4l2_g_ext_ctrls); ··· 3529 3180 3530 3181 cs->error_idx = cs->count; 3531 3182 for (i = 0; i < cs->count; i++) { 3532 - struct v4l2_ctrl *ctrl = helpers[i].ctrl; 3183 + struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; 3533 3184 union v4l2_ctrl_ptr p_new; 3534 3185 3535 3186 cs->error_idx = i; ··· 3576 3227 } 3577 3228 3578 3229 /* Try or try-and-set controls */ 3579 - static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, 3580 - struct v4l2_ext_controls *cs, 3581 - bool set) 3230 + static int try_set_ext_ctrls_common(struct v4l2_fh *fh, 3231 + struct v4l2_ctrl_handler *hdl, 3232 + struct v4l2_ext_controls *cs, bool set) 3582 3233 { 3583 3234 struct v4l2_ctrl_helper helper[4]; 3584 3235 struct v4l2_ctrl_helper *helpers = helper; ··· 3641 3292 do { 3642 3293 /* Check if the auto control is part of the 3643 3294 list, and remember the new value. */ 3644 - if (helpers[tmp_idx].ctrl == master) 3295 + if (helpers[tmp_idx].ref->ctrl == master) 3645 3296 new_auto_val = cs->controls[tmp_idx].value; 3646 3297 tmp_idx = helpers[tmp_idx].next; 3647 3298 } while (tmp_idx); ··· 3654 3305 /* Copy the new caller-supplied control values. 3655 3306 user_to_new() sets 'is_new' to 1. */ 3656 3307 do { 3657 - struct v4l2_ctrl *ctrl = helpers[idx].ctrl; 3308 + struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; 3658 3309 3659 3310 ret = user_to_new(cs->controls + idx, ctrl); 3660 3311 if (!ret && ctrl->is_ptr) ··· 3663 3314 } while (!ret && idx); 3664 3315 3665 3316 if (!ret) 3666 - ret = try_or_set_cluster(fh, master, set, 0); 3317 + ret = try_or_set_cluster(fh, master, 3318 + !hdl->req_obj.req && set, 0); 3319 + if (!ret && hdl->req_obj.req && set) { 3320 + for (j = 0; j < master->ncontrols; j++) { 3321 + struct v4l2_ctrl_ref *ref = 3322 + find_ref(hdl, master->cluster[j]->id); 3323 + 3324 + new_to_req(ref); 3325 + } 3326 + } 3667 3327 3668 3328 /* Copy the new values back to userspace. */ 3669 3329 if (!ret) { 3670 3330 idx = i; 3671 3331 do { 3672 3332 ret = new_to_user(cs->controls + idx, 3673 - helpers[idx].ctrl); 3333 + helpers[idx].ref->ctrl); 3674 3334 idx = helpers[idx].next; 3675 3335 } while (!ret && idx); 3676 3336 } ··· 3691 3333 return ret; 3692 3334 } 3693 3335 3694 - int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) 3336 + static int try_set_ext_ctrls(struct v4l2_fh *fh, 3337 + struct v4l2_ctrl_handler *hdl, struct media_device *mdev, 3338 + struct v4l2_ext_controls *cs, bool set) 3695 3339 { 3696 - return try_set_ext_ctrls(NULL, hdl, cs, false); 3340 + struct media_request_object *obj = NULL; 3341 + struct media_request *req = NULL; 3342 + int ret; 3343 + 3344 + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { 3345 + if (!mdev || cs->request_fd < 0) 3346 + return -EINVAL; 3347 + 3348 + req = media_request_get_by_fd(mdev, cs->request_fd); 3349 + if (IS_ERR(req)) 3350 + return PTR_ERR(req); 3351 + 3352 + ret = media_request_lock_for_update(req); 3353 + if (ret) { 3354 + media_request_put(req); 3355 + return ret; 3356 + } 3357 + 3358 + obj = v4l2_ctrls_find_req_obj(hdl, req, set); 3359 + if (IS_ERR(obj)) { 3360 + media_request_unlock_for_update(req); 3361 + media_request_put(req); 3362 + return PTR_ERR(obj); 3363 + } 3364 + hdl = container_of(obj, struct v4l2_ctrl_handler, 3365 + req_obj); 3366 + } 3367 + 3368 + ret = try_set_ext_ctrls_common(fh, hdl, cs, set); 3369 + 3370 + if (obj) { 3371 + media_request_unlock_for_update(req); 3372 + media_request_object_put(obj); 3373 + media_request_put(req); 3374 + } 3375 + 3376 + return ret; 3377 + } 3378 + 3379 + int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, 3380 + struct v4l2_ext_controls *cs) 3381 + { 3382 + return try_set_ext_ctrls(NULL, hdl, mdev, cs, false); 3697 3383 } 3698 3384 EXPORT_SYMBOL(v4l2_try_ext_ctrls); 3699 3385 3700 3386 int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, 3701 - struct v4l2_ext_controls *cs) 3387 + struct media_device *mdev, struct v4l2_ext_controls *cs) 3702 3388 { 3703 - return try_set_ext_ctrls(fh, hdl, cs, true); 3389 + return try_set_ext_ctrls(fh, hdl, mdev, cs, true); 3704 3390 } 3705 3391 EXPORT_SYMBOL(v4l2_s_ext_ctrls); 3706 3392 ··· 3842 3440 return set_ctrl(NULL, ctrl, 0); 3843 3441 } 3844 3442 EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); 3443 + 3444 + void v4l2_ctrl_request_complete(struct media_request *req, 3445 + struct v4l2_ctrl_handler *main_hdl) 3446 + { 3447 + struct media_request_object *obj; 3448 + struct v4l2_ctrl_handler *hdl; 3449 + struct v4l2_ctrl_ref *ref; 3450 + 3451 + if (!req || !main_hdl) 3452 + return; 3453 + 3454 + /* 3455 + * Note that it is valid if nothing was found. It means 3456 + * that this request doesn't have any controls and so just 3457 + * wants to leave the controls unchanged. 3458 + */ 3459 + obj = media_request_object_find(req, &req_ops, main_hdl); 3460 + if (!obj) 3461 + return; 3462 + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); 3463 + 3464 + list_for_each_entry(ref, &hdl->ctrl_refs, node) { 3465 + struct v4l2_ctrl *ctrl = ref->ctrl; 3466 + struct v4l2_ctrl *master = ctrl->cluster[0]; 3467 + unsigned int i; 3468 + 3469 + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { 3470 + ref->req = ref; 3471 + 3472 + v4l2_ctrl_lock(master); 3473 + /* g_volatile_ctrl will update the current control values */ 3474 + for (i = 0; i < master->ncontrols; i++) 3475 + cur_to_new(master->cluster[i]); 3476 + call_op(master, g_volatile_ctrl); 3477 + new_to_req(ref); 3478 + v4l2_ctrl_unlock(master); 3479 + continue; 3480 + } 3481 + if (ref->req == ref) 3482 + continue; 3483 + 3484 + v4l2_ctrl_lock(ctrl); 3485 + if (ref->req) 3486 + ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); 3487 + else 3488 + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); 3489 + v4l2_ctrl_unlock(ctrl); 3490 + } 3491 + 3492 + WARN_ON(!hdl->request_is_queued); 3493 + list_del_init(&hdl->requests_queued); 3494 + hdl->request_is_queued = false; 3495 + media_request_object_complete(obj); 3496 + media_request_object_put(obj); 3497 + } 3498 + EXPORT_SYMBOL(v4l2_ctrl_request_complete); 3499 + 3500 + void v4l2_ctrl_request_setup(struct media_request *req, 3501 + struct v4l2_ctrl_handler *main_hdl) 3502 + { 3503 + struct media_request_object *obj; 3504 + struct v4l2_ctrl_handler *hdl; 3505 + struct v4l2_ctrl_ref *ref; 3506 + 3507 + if (!req || !main_hdl) 3508 + return; 3509 + 3510 + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) 3511 + return; 3512 + 3513 + /* 3514 + * Note that it is valid if nothing was found. It means 3515 + * that this request doesn't have any controls and so just 3516 + * wants to leave the controls unchanged. 3517 + */ 3518 + obj = media_request_object_find(req, &req_ops, main_hdl); 3519 + if (!obj) 3520 + return; 3521 + if (obj->completed) { 3522 + media_request_object_put(obj); 3523 + return; 3524 + } 3525 + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); 3526 + 3527 + list_for_each_entry(ref, &hdl->ctrl_refs, node) 3528 + ref->req_done = false; 3529 + 3530 + list_for_each_entry(ref, &hdl->ctrl_refs, node) { 3531 + struct v4l2_ctrl *ctrl = ref->ctrl; 3532 + struct v4l2_ctrl *master = ctrl->cluster[0]; 3533 + bool have_new_data = false; 3534 + int i; 3535 + 3536 + /* 3537 + * Skip if this control was already handled by a cluster. 3538 + * Skip button controls and read-only controls. 3539 + */ 3540 + if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || 3541 + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) 3542 + continue; 3543 + 3544 + v4l2_ctrl_lock(master); 3545 + for (i = 0; i < master->ncontrols; i++) { 3546 + if (master->cluster[i]) { 3547 + struct v4l2_ctrl_ref *r = 3548 + find_ref(hdl, master->cluster[i]->id); 3549 + 3550 + if (r->req && r == r->req) { 3551 + have_new_data = true; 3552 + break; 3553 + } 3554 + } 3555 + } 3556 + if (!have_new_data) { 3557 + v4l2_ctrl_unlock(master); 3558 + continue; 3559 + } 3560 + 3561 + for (i = 0; i < master->ncontrols; i++) { 3562 + if (master->cluster[i]) { 3563 + struct v4l2_ctrl_ref *r = 3564 + find_ref(hdl, master->cluster[i]->id); 3565 + 3566 + req_to_new(r); 3567 + master->cluster[i]->is_new = 1; 3568 + r->req_done = true; 3569 + } 3570 + } 3571 + /* 3572 + * For volatile autoclusters that are currently in auto mode 3573 + * we need to discover if it will be set to manual mode. 3574 + * If so, then we have to copy the current volatile values 3575 + * first since those will become the new manual values (which 3576 + * may be overwritten by explicit new values from this set 3577 + * of controls). 3578 + */ 3579 + if (master->is_auto && master->has_volatiles && 3580 + !is_cur_manual(master)) { 3581 + s32 new_auto_val = *master->p_new.p_s32; 3582 + 3583 + /* 3584 + * If the new value == the manual value, then copy 3585 + * the current volatile values. 3586 + */ 3587 + if (new_auto_val == master->manual_mode_value) 3588 + update_from_auto_cluster(master); 3589 + } 3590 + 3591 + try_or_set_cluster(NULL, master, true, 0); 3592 + 3593 + v4l2_ctrl_unlock(master); 3594 + } 3595 + 3596 + media_request_object_put(obj); 3597 + } 3598 + EXPORT_SYMBOL(v4l2_ctrl_request_setup); 3845 3599 3846 3600 void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) 3847 3601 {
+16 -2
drivers/media/v4l2-core/v4l2-dev.c
··· 444 444 struct video_device *vdev = video_devdata(filp); 445 445 int ret = 0; 446 446 447 - if (vdev->fops->release) 448 - ret = vdev->fops->release(filp); 447 + /* 448 + * We need to serialize the release() with queueing new requests. 449 + * The release() may trigger the cancellation of a streaming 450 + * operation, and that should not be mixed with queueing a new 451 + * request at the same time. 452 + */ 453 + if (vdev->fops->release) { 454 + if (v4l2_device_supports_requests(vdev->v4l2_dev)) { 455 + mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); 456 + ret = vdev->fops->release(filp); 457 + mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); 458 + } else { 459 + ret = vdev->fops->release(filp); 460 + } 461 + } 462 + 449 463 if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) 450 464 dprintk("%s: release\n", 451 465 video_device_node_name(vdev));
+2 -1
drivers/media/v4l2-core/v4l2-device.c
··· 178 178 179 179 sd->v4l2_dev = v4l2_dev; 180 180 /* This just returns 0 if either of the two args is NULL */ 181 - err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL); 181 + err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, 182 + NULL, true); 182 183 if (err) 183 184 goto error_module; 184 185
+36 -14
drivers/media/v4l2-core/v4l2-ioctl.c
··· 474 474 const struct v4l2_plane *plane; 475 475 int i; 476 476 477 - pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s", 477 + pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", 478 478 p->timestamp.tv_sec / 3600, 479 479 (int)(p->timestamp.tv_sec / 60) % 60, 480 480 (int)(p->timestamp.tv_sec % 60), 481 481 (long)p->timestamp.tv_usec, 482 482 p->index, 483 - prt_names(p->type, v4l2_type_names), 483 + prt_names(p->type, v4l2_type_names), p->request_fd, 484 484 p->flags, prt_names(p->field, v4l2_field_names), 485 485 p->sequence, prt_names(p->memory, v4l2_memory_names)); 486 486 ··· 590 590 const struct v4l2_ext_controls *p = arg; 591 591 int i; 592 592 593 - pr_cont("which=0x%x, count=%d, error_idx=%d", 594 - p->which, p->count, p->error_idx); 593 + pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", 594 + p->which, p->count, p->error_idx, p->request_fd); 595 595 for (i = 0; i < p->count; i++) { 596 596 if (!p->controls[i].size) 597 597 pr_cont(", id/val=0x%x/0x%x", ··· 907 907 __u32 i; 908 908 909 909 /* zero the reserved fields */ 910 - c->reserved[0] = c->reserved[1] = 0; 910 + c->reserved[0] = 0; 911 911 for (i = 0; i < c->count; i++) 912 912 c->controls[i].reserved2[0] = 0; 913 913 ··· 1309 1309 case V4L2_PIX_FMT_H263: descr = "H.263"; break; 1310 1310 case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; 1311 1311 case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; 1312 + case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; 1312 1313 case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break; 1313 1314 case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; 1314 1315 case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; ··· 1337 1336 case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; 1338 1337 case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; 1339 1338 case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; 1339 + case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break; 1340 1340 default: 1341 1341 WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); 1342 1342 if (fmt->description[0]) ··· 1879 1877 if (ret) 1880 1878 return ret; 1881 1879 1882 - CLEAR_AFTER_FIELD(p, memory); 1880 + CLEAR_AFTER_FIELD(p, capabilities); 1883 1881 1884 1882 return ops->vidioc_reqbufs(file, fh, p); 1885 1883 } ··· 1920 1918 if (ret) 1921 1919 return ret; 1922 1920 1923 - CLEAR_AFTER_FIELD(create, format); 1921 + CLEAR_AFTER_FIELD(create, capabilities); 1924 1922 1925 1923 v4l_sanitize_format(&create->format); 1926 1924 ··· 2111 2109 2112 2110 p->error_idx = p->count; 2113 2111 if (vfh && vfh->ctrl_handler) 2114 - return v4l2_g_ext_ctrls(vfh->ctrl_handler, p); 2112 + return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); 2115 2113 if (vfd->ctrl_handler) 2116 - return v4l2_g_ext_ctrls(vfd->ctrl_handler, p); 2114 + return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); 2117 2115 if (ops->vidioc_g_ext_ctrls == NULL) 2118 2116 return -ENOTTY; 2119 2117 return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) : ··· 2130 2128 2131 2129 p->error_idx = p->count; 2132 2130 if (vfh && vfh->ctrl_handler) 2133 - return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p); 2131 + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); 2134 2132 if (vfd->ctrl_handler) 2135 - return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p); 2133 + return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); 2136 2134 if (ops->vidioc_s_ext_ctrls == NULL) 2137 2135 return -ENOTTY; 2138 2136 return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) : ··· 2149 2147 2150 2148 p->error_idx = p->count; 2151 2149 if (vfh && vfh->ctrl_handler) 2152 - return v4l2_try_ext_ctrls(vfh->ctrl_handler, p); 2150 + return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); 2153 2151 if (vfd->ctrl_handler) 2154 - return v4l2_try_ext_ctrls(vfd->ctrl_handler, p); 2152 + return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); 2155 2153 if (ops->vidioc_try_ext_ctrls == NULL) 2156 2154 return -ENOTTY; 2157 2155 return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) : ··· 2782 2780 unsigned int cmd, void *arg) 2783 2781 { 2784 2782 struct video_device *vfd = video_devdata(file); 2783 + struct mutex *req_queue_lock = NULL; 2785 2784 struct mutex *lock; /* ioctl serialization mutex */ 2786 2785 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; 2787 2786 bool write_only = false; ··· 2802 2799 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) 2803 2800 vfh = file->private_data; 2804 2801 2802 + /* 2803 + * We need to serialize streamon/off with queueing new requests. 2804 + * These ioctls may trigger the cancellation of a streaming 2805 + * operation, and that should not be mixed with queueing a new 2806 + * request at the same time. 2807 + */ 2808 + if (v4l2_device_supports_requests(vfd->v4l2_dev) && 2809 + (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { 2810 + req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; 2811 + 2812 + if (mutex_lock_interruptible(req_queue_lock)) 2813 + return -ERESTARTSYS; 2814 + } 2815 + 2805 2816 lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); 2806 2817 2807 - if (lock && mutex_lock_interruptible(lock)) 2818 + if (lock && mutex_lock_interruptible(lock)) { 2819 + if (req_queue_lock) 2820 + mutex_unlock(req_queue_lock); 2808 2821 return -ERESTARTSYS; 2822 + } 2809 2823 2810 2824 if (!video_is_registered(vfd)) { 2811 2825 ret = -ENODEV; ··· 2881 2861 unlock: 2882 2862 if (lock) 2883 2863 mutex_unlock(lock); 2864 + if (req_queue_lock) 2865 + mutex_unlock(req_queue_lock); 2884 2866 return ret; 2885 2867 } 2886 2868
+58 -9
drivers/media/v4l2-core/v4l2-mem2mem.c
··· 387 387 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 388 388 if (m2m_dev->m2m_ops->job_abort) 389 389 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 390 - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); 390 + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 391 391 wait_event(m2m_ctx->finished, 392 392 !(m2m_ctx->job_flags & TRANS_RUNNING)); 393 393 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { ··· 473 473 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 474 474 struct v4l2_buffer *buf) 475 475 { 476 + struct video_device *vdev = video_devdata(file); 476 477 struct vb2_queue *vq; 477 478 int ret; 478 479 479 480 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 480 - ret = vb2_qbuf(vq, buf); 481 - if (!ret) 481 + if (!V4L2_TYPE_IS_OUTPUT(vq->type) && 482 + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 483 + dprintk("%s: requests cannot be used with capture buffers\n", 484 + __func__); 485 + return -EPERM; 486 + } 487 + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 488 + if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 482 489 v4l2_m2m_try_schedule(m2m_ctx); 483 490 484 491 return ret; ··· 505 498 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 506 499 struct v4l2_buffer *buf) 507 500 { 501 + struct video_device *vdev = video_devdata(file); 508 502 struct vb2_queue *vq; 509 - int ret; 510 503 511 504 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 512 - ret = vb2_prepare_buf(vq, buf); 513 - if (!ret) 514 - v4l2_m2m_try_schedule(m2m_ctx); 515 - 516 - return ret; 505 + return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 517 506 } 518 507 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 519 508 ··· 952 949 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 953 950 } 954 951 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 952 + 953 + void vb2_m2m_request_queue(struct media_request *req) 954 + { 955 + struct media_request_object *obj, *obj_safe; 956 + struct v4l2_m2m_ctx *m2m_ctx = NULL; 957 + 958 + /* 959 + * Queue all objects. Note that buffer objects are at the end of the 960 + * objects list, after all other object types. Once buffer objects 961 + * are queued, the driver might delete them immediately (if the driver 962 + * processes the buffer at once), so we have to use 963 + * list_for_each_entry_safe() to handle the case where the object we 964 + * queue is deleted. 965 + */ 966 + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 967 + struct v4l2_m2m_ctx *m2m_ctx_obj; 968 + struct vb2_buffer *vb; 969 + 970 + if (!obj->ops->queue) 971 + continue; 972 + 973 + if (vb2_request_object_is_buffer(obj)) { 974 + /* Sanity checks */ 975 + vb = container_of(obj, struct vb2_buffer, req_obj); 976 + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 977 + m2m_ctx_obj = container_of(vb->vb2_queue, 978 + struct v4l2_m2m_ctx, 979 + out_q_ctx.q); 980 + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 981 + m2m_ctx = m2m_ctx_obj; 982 + } 983 + 984 + /* 985 + * The buffer we queue here can in theory be immediately 986 + * unbound, hence the use of list_for_each_entry_safe() 987 + * above and why we call the queue op last. 988 + */ 989 + obj->ops->queue(obj); 990 + } 991 + 992 + WARN_ON(!m2m_ctx); 993 + 994 + if (m2m_ctx) 995 + v4l2_m2m_try_schedule(m2m_ctx); 996 + } 997 + EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); 955 998 956 999 /* Videobuf2 ioctl helpers */ 957 1000
+6 -3
drivers/media/v4l2-core/v4l2-subdev.c
··· 222 222 case VIDIOC_G_EXT_CTRLS: 223 223 if (!vfh->ctrl_handler) 224 224 return -ENOTTY; 225 - return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg); 225 + return v4l2_g_ext_ctrls(vfh->ctrl_handler, 226 + sd->v4l2_dev->mdev, arg); 226 227 227 228 case VIDIOC_S_EXT_CTRLS: 228 229 if (!vfh->ctrl_handler) 229 230 return -ENOTTY; 230 - return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg); 231 + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, 232 + sd->v4l2_dev->mdev, arg); 231 233 232 234 case VIDIOC_TRY_EXT_CTRLS: 233 235 if (!vfh->ctrl_handler) 234 236 return -ENOTTY; 235 - return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg); 237 + return v4l2_try_ext_ctrls(vfh->ctrl_handler, 238 + sd->v4l2_dev->mdev, arg); 236 239 237 240 case VIDIOC_DQEVENT: 238 241 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+2
drivers/staging/media/Kconfig
··· 31 31 32 32 source "drivers/staging/media/omap4iss/Kconfig" 33 33 34 + source "drivers/staging/media/sunxi/Kconfig" 35 + 34 36 source "drivers/staging/media/tegra-vde/Kconfig" 35 37 36 38 source "drivers/staging/media/zoran/Kconfig"
+1
drivers/staging/media/Makefile
··· 5 5 obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/ 6 6 obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/ 7 7 obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/ 8 + obj-$(CONFIG_VIDEO_SUNXI) += sunxi/ 8 9 obj-$(CONFIG_TEGRA_VDE) += tegra-vde/ 9 10 obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+2 -5
drivers/staging/media/davinci_vpfe/vpfe_video.c
··· 1135 1135 1136 1136 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n"); 1137 1137 1138 - if (vb->state != VB2_BUF_STATE_ACTIVE && 1139 - vb->state != VB2_BUF_STATE_PREPARED) 1140 - return 0; 1141 - 1142 1138 /* Initialize buffer */ 1143 1139 vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage); 1144 1140 if (vb2_plane_vaddr(vb, 0) && ··· 1425 1429 return -EACCES; 1426 1430 } 1427 1431 1428 - return vb2_qbuf(&video->buffer_queue, p); 1432 + return vb2_qbuf(&video->buffer_queue, 1433 + video->video_dev.v4l2_dev->mdev, p); 1429 1434 } 1430 1435 1431 1436 /*
+1 -1
drivers/staging/media/imx/imx-media-dev.c
··· 350 350 351 351 ret = v4l2_ctrl_add_handler(vfd->ctrl_handler, 352 352 sd->ctrl_handler, 353 - NULL); 353 + NULL, true); 354 354 if (ret) 355 355 return ret; 356 356 }
+1 -1
drivers/staging/media/imx/imx-media-fim.c
··· 463 463 { 464 464 /* add the FIM controls to the calling subdev ctrl handler */ 465 465 return v4l2_ctrl_add_handler(fim->sd->ctrl_handler, 466 - &fim->ctrl_handler, NULL); 466 + &fim->ctrl_handler, NULL, false); 467 467 } 468 468 EXPORT_SYMBOL_GPL(imx_media_fim_add_controls); 469 469
+2 -1
drivers/staging/media/omap4iss/iss_video.c
··· 802 802 static int 803 803 iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 804 804 { 805 + struct iss_video *video = video_drvdata(file); 805 806 struct iss_video_fh *vfh = to_iss_video_fh(fh); 806 807 807 - return vb2_qbuf(&vfh->queue, b); 808 + return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 808 809 } 809 810 810 811 static int
+15
drivers/staging/media/sunxi/Kconfig
··· 1 + config VIDEO_SUNXI 2 + bool "Allwinner sunXi family Video Devices" 3 + depends on ARCH_SUNXI || COMPILE_TEST 4 + help 5 + If you have an Allwinner SoC based on the sunXi family, say Y. 6 + 7 + Note that this option doesn't include new drivers in the 8 + kernel: saying N will just cause Kconfig to skip all the 9 + questions about Allwinner media devices. 10 + 11 + if VIDEO_SUNXI 12 + 13 + source "drivers/staging/media/sunxi/cedrus/Kconfig" 14 + 15 + endif
+1
drivers/staging/media/sunxi/Makefile
··· 1 + obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/
+14
drivers/staging/media/sunxi/cedrus/Kconfig
··· 1 + config VIDEO_SUNXI_CEDRUS 2 + tristate "Allwinner Cedrus VPU driver" 3 + depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER 4 + depends on HAS_DMA 5 + depends on OF 6 + select SUNXI_SRAM 7 + select VIDEOBUF2_DMA_CONTIG 8 + select V4L2_MEM2MEM_DEV 9 + help 10 + Support for the VPU found in Allwinner SoCs, also known as the Cedar 11 + video engine. 12 + 13 + To compile this driver as a module, choose M here: the module 14 + will be called sunxi-cedrus.
+3
drivers/staging/media/sunxi/cedrus/Makefile
··· 1 + obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o 2 + 3 + sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
+7
drivers/staging/media/sunxi/cedrus/TODO
··· 1 + Before this stateless decoder driver can leave the staging area: 2 + * The Request API needs to be stabilized; 3 + * The codec-specific controls need to be thoroughly reviewed to ensure they 4 + cover all intended uses cases; 5 + * Userspace support for the Request API needs to be reviewed; 6 + * Another stateless decoder driver should be submitted; 7 + * At least one stateless encoder driver should be submitted.
+431
drivers/staging/media/sunxi/cedrus/cedrus.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #include <linux/platform_device.h> 17 + #include <linux/module.h> 18 + #include <linux/of.h> 19 + 20 + #include <media/v4l2-device.h> 21 + #include <media/v4l2-ioctl.h> 22 + #include <media/v4l2-ctrls.h> 23 + #include <media/v4l2-mem2mem.h> 24 + 25 + #include "cedrus.h" 26 + #include "cedrus_video.h" 27 + #include "cedrus_dec.h" 28 + #include "cedrus_hw.h" 29 + 30 + static const struct cedrus_control cedrus_controls[] = { 31 + { 32 + .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS, 33 + .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params), 34 + .codec = CEDRUS_CODEC_MPEG2, 35 + .required = true, 36 + }, 37 + { 38 + .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION, 39 + .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization), 40 + .codec = CEDRUS_CODEC_MPEG2, 41 + .required = false, 42 + }, 43 + }; 44 + 45 + #define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls) 46 + 47 + void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id) 48 + { 49 + unsigned int i; 50 + 51 + for (i = 0; ctx->ctrls[i]; i++) 52 + if (ctx->ctrls[i]->id == id) 53 + return ctx->ctrls[i]->p_cur.p; 54 + 55 + return NULL; 56 + } 57 + 58 + static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx) 59 + { 60 + struct v4l2_ctrl_handler *hdl = &ctx->hdl; 61 + struct v4l2_ctrl *ctrl; 62 + unsigned int ctrl_size; 63 + unsigned int i; 64 + 65 + v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT); 66 + if (hdl->error) { 67 + v4l2_err(&dev->v4l2_dev, 68 + "Failed to initialize control handler\n"); 69 + return hdl->error; 70 + } 71 + 72 + ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1; 73 + 74 + ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL); 75 + memset(ctx->ctrls, 0, ctrl_size); 76 + 77 + for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) { 78 + struct v4l2_ctrl_config cfg = { 0 }; 79 + 80 + cfg.elem_size = cedrus_controls[i].elem_size; 81 + cfg.id = cedrus_controls[i].id; 82 + 83 + ctrl = v4l2_ctrl_new_custom(hdl, &cfg, NULL); 84 + if (hdl->error) { 85 + v4l2_err(&dev->v4l2_dev, 86 + "Failed to create new custom control\n"); 87 + 88 + v4l2_ctrl_handler_free(hdl); 89 + kfree(ctx->ctrls); 90 + return hdl->error; 91 + } 92 + 93 + ctx->ctrls[i] = ctrl; 94 + } 95 + 96 + ctx->fh.ctrl_handler = hdl; 97 + v4l2_ctrl_handler_setup(hdl); 98 + 99 + return 0; 100 + } 101 + 102 + static int cedrus_request_validate(struct media_request *req) 103 + { 104 + struct media_request_object *obj; 105 + struct v4l2_ctrl_handler *parent_hdl, *hdl; 106 + struct cedrus_ctx *ctx = NULL; 107 + struct v4l2_ctrl *ctrl_test; 108 + unsigned int count; 109 + unsigned int i; 110 + 111 + count = vb2_request_buffer_cnt(req); 112 + if (!count) { 113 + v4l2_info(&ctx->dev->v4l2_dev, 114 + "No buffer was provided with the request\n"); 115 + return -ENOENT; 116 + } else if (count > 1) { 117 + v4l2_info(&ctx->dev->v4l2_dev, 118 + "More than one buffer was provided with the request\n"); 119 + return -EINVAL; 120 + } 121 + 122 + list_for_each_entry(obj, &req->objects, list) { 123 + struct vb2_buffer *vb; 124 + 125 + if (vb2_request_object_is_buffer(obj)) { 126 + vb = container_of(obj, struct vb2_buffer, req_obj); 127 + ctx = vb2_get_drv_priv(vb->vb2_queue); 128 + 129 + break; 130 + } 131 + } 132 + 133 + if (!ctx) 134 + return -ENOENT; 135 + 136 + parent_hdl = &ctx->hdl; 137 + 138 + hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl); 139 + if (!hdl) { 140 + v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n"); 141 + return -ENOENT; 142 + } 143 + 144 + for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) { 145 + if (cedrus_controls[i].codec != ctx->current_codec || 146 + !cedrus_controls[i].required) 147 + continue; 148 + 149 + ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl, 150 + cedrus_controls[i].id); 151 + if (!ctrl_test) { 152 + v4l2_info(&ctx->dev->v4l2_dev, 153 + "Missing required codec control\n"); 154 + return -ENOENT; 155 + } 156 + } 157 + 158 + v4l2_ctrl_request_hdl_put(hdl); 159 + 160 + return vb2_request_validate(req); 161 + } 162 + 163 + static int cedrus_open(struct file *file) 164 + { 165 + struct cedrus_dev *dev = video_drvdata(file); 166 + struct cedrus_ctx *ctx = NULL; 167 + int ret; 168 + 169 + if (mutex_lock_interruptible(&dev->dev_mutex)) 170 + return -ERESTARTSYS; 171 + 172 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 173 + if (!ctx) { 174 + mutex_unlock(&dev->dev_mutex); 175 + return -ENOMEM; 176 + } 177 + 178 + v4l2_fh_init(&ctx->fh, video_devdata(file)); 179 + file->private_data = &ctx->fh; 180 + ctx->dev = dev; 181 + 182 + ret = cedrus_init_ctrls(dev, ctx); 183 + if (ret) 184 + goto err_free; 185 + 186 + ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, 187 + &cedrus_queue_init); 188 + if (IS_ERR(ctx->fh.m2m_ctx)) { 189 + ret = PTR_ERR(ctx->fh.m2m_ctx); 190 + goto err_ctrls; 191 + } 192 + 193 + v4l2_fh_add(&ctx->fh); 194 + 195 + mutex_unlock(&dev->dev_mutex); 196 + 197 + return 0; 198 + 199 + err_ctrls: 200 + v4l2_ctrl_handler_free(&ctx->hdl); 201 + err_free: 202 + kfree(ctx); 203 + mutex_unlock(&dev->dev_mutex); 204 + 205 + return ret; 206 + } 207 + 208 + static int cedrus_release(struct file *file) 209 + { 210 + struct cedrus_dev *dev = video_drvdata(file); 211 + struct cedrus_ctx *ctx = container_of(file->private_data, 212 + struct cedrus_ctx, fh); 213 + 214 + mutex_lock(&dev->dev_mutex); 215 + 216 + v4l2_fh_del(&ctx->fh); 217 + v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 218 + 219 + v4l2_ctrl_handler_free(&ctx->hdl); 220 + kfree(ctx->ctrls); 221 + 222 + v4l2_fh_exit(&ctx->fh); 223 + 224 + kfree(ctx); 225 + 226 + mutex_unlock(&dev->dev_mutex); 227 + 228 + return 0; 229 + } 230 + 231 + static const struct v4l2_file_operations cedrus_fops = { 232 + .owner = THIS_MODULE, 233 + .open = cedrus_open, 234 + .release = cedrus_release, 235 + .poll = v4l2_m2m_fop_poll, 236 + .unlocked_ioctl = video_ioctl2, 237 + .mmap = v4l2_m2m_fop_mmap, 238 + }; 239 + 240 + static const struct video_device cedrus_video_device = { 241 + .name = CEDRUS_NAME, 242 + .vfl_dir = VFL_DIR_M2M, 243 + .fops = &cedrus_fops, 244 + .ioctl_ops = &cedrus_ioctl_ops, 245 + .minor = -1, 246 + .release = video_device_release_empty, 247 + .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING, 248 + }; 249 + 250 + static const struct v4l2_m2m_ops cedrus_m2m_ops = { 251 + .device_run = cedrus_device_run, 252 + }; 253 + 254 + static const struct media_device_ops cedrus_m2m_media_ops = { 255 + .req_validate = cedrus_request_validate, 256 + .req_queue = vb2_m2m_request_queue, 257 + }; 258 + 259 + static int cedrus_probe(struct platform_device *pdev) 260 + { 261 + struct cedrus_dev *dev; 262 + struct video_device *vfd; 263 + int ret; 264 + 265 + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 266 + if (!dev) 267 + return -ENOMEM; 268 + 269 + dev->vfd = cedrus_video_device; 270 + dev->dev = &pdev->dev; 271 + dev->pdev = pdev; 272 + 273 + ret = cedrus_hw_probe(dev); 274 + if (ret) { 275 + dev_err(&pdev->dev, "Failed to probe hardware\n"); 276 + return ret; 277 + } 278 + 279 + dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2; 280 + 281 + mutex_init(&dev->dev_mutex); 282 + spin_lock_init(&dev->irq_lock); 283 + 284 + ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); 285 + if (ret) { 286 + dev_err(&pdev->dev, "Failed to register V4L2 device\n"); 287 + return ret; 288 + } 289 + 290 + vfd = &dev->vfd; 291 + vfd->lock = &dev->dev_mutex; 292 + vfd->v4l2_dev = &dev->v4l2_dev; 293 + 294 + snprintf(vfd->name, sizeof(vfd->name), "%s", cedrus_video_device.name); 295 + video_set_drvdata(vfd, dev); 296 + 297 + dev->m2m_dev = v4l2_m2m_init(&cedrus_m2m_ops); 298 + if (IS_ERR(dev->m2m_dev)) { 299 + v4l2_err(&dev->v4l2_dev, 300 + "Failed to initialize V4L2 M2M device\n"); 301 + ret = PTR_ERR(dev->m2m_dev); 302 + 303 + goto err_video; 304 + } 305 + 306 + dev->mdev.dev = &pdev->dev; 307 + strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model)); 308 + 309 + media_device_init(&dev->mdev); 310 + dev->mdev.ops = &cedrus_m2m_media_ops; 311 + dev->v4l2_dev.mdev = &dev->mdev; 312 + 313 + ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, 314 + MEDIA_ENT_F_PROC_VIDEO_DECODER); 315 + if (ret) { 316 + v4l2_err(&dev->v4l2_dev, 317 + "Failed to initialize V4L2 M2M media controller\n"); 318 + goto err_m2m; 319 + } 320 + 321 + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); 322 + if (ret) { 323 + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); 324 + goto err_v4l2; 325 + } 326 + 327 + v4l2_info(&dev->v4l2_dev, 328 + "Device registered as /dev/video%d\n", vfd->num); 329 + 330 + ret = media_device_register(&dev->mdev); 331 + if (ret) { 332 + v4l2_err(&dev->v4l2_dev, "Failed to register media device\n"); 333 + goto err_m2m_mc; 334 + } 335 + 336 + platform_set_drvdata(pdev, dev); 337 + 338 + return 0; 339 + 340 + err_m2m_mc: 341 + v4l2_m2m_unregister_media_controller(dev->m2m_dev); 342 + err_m2m: 343 + v4l2_m2m_release(dev->m2m_dev); 344 + err_video: 345 + video_unregister_device(&dev->vfd); 346 + err_v4l2: 347 + v4l2_device_unregister(&dev->v4l2_dev); 348 + 349 + return ret; 350 + } 351 + 352 + static int cedrus_remove(struct platform_device *pdev) 353 + { 354 + struct cedrus_dev *dev = platform_get_drvdata(pdev); 355 + 356 + if (media_devnode_is_registered(dev->mdev.devnode)) { 357 + media_device_unregister(&dev->mdev); 358 + v4l2_m2m_unregister_media_controller(dev->m2m_dev); 359 + media_device_cleanup(&dev->mdev); 360 + } 361 + 362 + v4l2_m2m_release(dev->m2m_dev); 363 + video_unregister_device(&dev->vfd); 364 + v4l2_device_unregister(&dev->v4l2_dev); 365 + 366 + cedrus_hw_remove(dev); 367 + 368 + return 0; 369 + } 370 + 371 + static const struct cedrus_variant sun4i_a10_cedrus_variant = { 372 + /* No particular capability. */ 373 + }; 374 + 375 + static const struct cedrus_variant sun5i_a13_cedrus_variant = { 376 + /* No particular capability. */ 377 + }; 378 + 379 + static const struct cedrus_variant sun7i_a20_cedrus_variant = { 380 + /* No particular capability. */ 381 + }; 382 + 383 + static const struct cedrus_variant sun8i_a33_cedrus_variant = { 384 + .capabilities = CEDRUS_CAPABILITY_UNTILED, 385 + }; 386 + 387 + static const struct cedrus_variant sun8i_h3_cedrus_variant = { 388 + .capabilities = CEDRUS_CAPABILITY_UNTILED, 389 + }; 390 + 391 + static const struct of_device_id cedrus_dt_match[] = { 392 + { 393 + .compatible = "allwinner,sun4i-a10-video-engine", 394 + .data = &sun4i_a10_cedrus_variant, 395 + }, 396 + { 397 + .compatible = "allwinner,sun5i-a13-video-engine", 398 + .data = &sun5i_a13_cedrus_variant, 399 + }, 400 + { 401 + .compatible = "allwinner,sun7i-a20-video-engine", 402 + .data = &sun7i_a20_cedrus_variant, 403 + }, 404 + { 405 + .compatible = "allwinner,sun8i-a33-video-engine", 406 + .data = &sun8i_a33_cedrus_variant, 407 + }, 408 + { 409 + .compatible = "allwinner,sun8i-h3-video-engine", 410 + .data = &sun8i_h3_cedrus_variant, 411 + }, 412 + { /* sentinel */ } 413 + }; 414 + MODULE_DEVICE_TABLE(of, cedrus_dt_match); 415 + 416 + static struct platform_driver cedrus_driver = { 417 + .probe = cedrus_probe, 418 + .remove = cedrus_remove, 419 + .driver = { 420 + .name = CEDRUS_NAME, 421 + .owner = THIS_MODULE, 422 + .of_match_table = of_match_ptr(cedrus_dt_match), 423 + }, 424 + }; 425 + module_platform_driver(cedrus_driver); 426 + 427 + MODULE_LICENSE("GPL v2"); 428 + MODULE_AUTHOR("Florent Revest <florent.revest@free-electrons.com>"); 429 + MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>"); 430 + MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>"); 431 + MODULE_DESCRIPTION("Cedrus VPU driver");
+167
drivers/staging/media/sunxi/cedrus/cedrus.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #ifndef _CEDRUS_H_ 17 + #define _CEDRUS_H_ 18 + 19 + #include <media/v4l2-ctrls.h> 20 + #include <media/v4l2-device.h> 21 + #include <media/v4l2-mem2mem.h> 22 + #include <media/videobuf2-v4l2.h> 23 + #include <media/videobuf2-dma-contig.h> 24 + 25 + #include <linux/platform_device.h> 26 + 27 + #define CEDRUS_NAME "cedrus" 28 + 29 + #define CEDRUS_CAPABILITY_UNTILED BIT(0) 30 + 31 + enum cedrus_codec { 32 + CEDRUS_CODEC_MPEG2, 33 + 34 + CEDRUS_CODEC_LAST, 35 + }; 36 + 37 + enum cedrus_irq_status { 38 + CEDRUS_IRQ_NONE, 39 + CEDRUS_IRQ_ERROR, 40 + CEDRUS_IRQ_OK, 41 + }; 42 + 43 + struct cedrus_control { 44 + u32 id; 45 + u32 elem_size; 46 + enum cedrus_codec codec; 47 + unsigned char required:1; 48 + }; 49 + 50 + struct cedrus_mpeg2_run { 51 + const struct v4l2_ctrl_mpeg2_slice_params *slice_params; 52 + const struct v4l2_ctrl_mpeg2_quantization *quantization; 53 + }; 54 + 55 + struct cedrus_run { 56 + struct vb2_v4l2_buffer *src; 57 + struct vb2_v4l2_buffer *dst; 58 + 59 + union { 60 + struct cedrus_mpeg2_run mpeg2; 61 + }; 62 + }; 63 + 64 + struct cedrus_buffer { 65 + struct v4l2_m2m_buffer m2m_buf; 66 + }; 67 + 68 + struct cedrus_ctx { 69 + struct v4l2_fh fh; 70 + struct cedrus_dev *dev; 71 + 72 + struct v4l2_pix_format src_fmt; 73 + struct v4l2_pix_format dst_fmt; 74 + enum cedrus_codec current_codec; 75 + 76 + struct v4l2_ctrl_handler hdl; 77 + struct v4l2_ctrl **ctrls; 78 + 79 + struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME]; 80 + }; 81 + 82 + struct cedrus_dec_ops { 83 + void (*irq_clear)(struct cedrus_ctx *ctx); 84 + void (*irq_disable)(struct cedrus_ctx *ctx); 85 + enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx); 86 + void (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run); 87 + int (*start)(struct cedrus_ctx *ctx); 88 + void (*stop)(struct cedrus_ctx *ctx); 89 + void (*trigger)(struct cedrus_ctx *ctx); 90 + }; 91 + 92 + struct cedrus_variant { 93 + unsigned int capabilities; 94 + }; 95 + 96 + struct cedrus_dev { 97 + struct v4l2_device v4l2_dev; 98 + struct video_device vfd; 99 + struct media_device mdev; 100 + struct media_pad pad[2]; 101 + struct platform_device *pdev; 102 + struct device *dev; 103 + struct v4l2_m2m_dev *m2m_dev; 104 + struct cedrus_dec_ops *dec_ops[CEDRUS_CODEC_LAST]; 105 + 106 + /* Device file mutex */ 107 + struct mutex dev_mutex; 108 + /* Interrupt spinlock */ 109 + spinlock_t irq_lock; 110 + 111 + void __iomem *base; 112 + 113 + struct clk *mod_clk; 114 + struct clk *ahb_clk; 115 + struct clk *ram_clk; 116 + 117 + struct reset_control *rstc; 118 + 119 + unsigned int capabilities; 120 + }; 121 + 122 + extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2; 123 + 124 + static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val) 125 + { 126 + writel(val, dev->base + reg); 127 + } 128 + 129 + static inline u32 cedrus_read(struct cedrus_dev *dev, u32 reg) 130 + { 131 + return readl(dev->base + reg); 132 + } 133 + 134 + static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf, 135 + struct v4l2_pix_format *pix_fmt, 136 + unsigned int plane) 137 + { 138 + dma_addr_t addr = vb2_dma_contig_plane_dma_addr(buf, 0); 139 + 140 + return addr + (pix_fmt ? (dma_addr_t)pix_fmt->bytesperline * 141 + pix_fmt->height * plane : 0); 142 + } 143 + 144 + static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx, 145 + unsigned int index, 146 + unsigned int plane) 147 + { 148 + struct vb2_buffer *buf = ctx->dst_bufs[index]; 149 + 150 + return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0; 151 + } 152 + 153 + static inline struct cedrus_buffer * 154 + vb2_v4l2_to_cedrus_buffer(const struct vb2_v4l2_buffer *p) 155 + { 156 + return container_of(p, struct cedrus_buffer, m2m_buf.vb); 157 + } 158 + 159 + static inline struct cedrus_buffer * 160 + vb2_to_cedrus_buffer(const struct vb2_buffer *p) 161 + { 162 + return vb2_v4l2_to_cedrus_buffer(to_vb2_v4l2_buffer(p)); 163 + } 164 + 165 + void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id); 166 + 167 + #endif
+70
drivers/staging/media/sunxi/cedrus/cedrus_dec.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #include <media/v4l2-device.h> 17 + #include <media/v4l2-ioctl.h> 18 + #include <media/v4l2-event.h> 19 + #include <media/v4l2-mem2mem.h> 20 + 21 + #include "cedrus.h" 22 + #include "cedrus_dec.h" 23 + #include "cedrus_hw.h" 24 + 25 + void cedrus_device_run(void *priv) 26 + { 27 + struct cedrus_ctx *ctx = priv; 28 + struct cedrus_dev *dev = ctx->dev; 29 + struct cedrus_run run = { 0 }; 30 + struct media_request *src_req; 31 + unsigned long flags; 32 + 33 + run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 34 + run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 35 + 36 + /* Apply request(s) controls if needed. */ 37 + src_req = run.src->vb2_buf.req_obj.req; 38 + 39 + if (src_req) 40 + v4l2_ctrl_request_setup(src_req, &ctx->hdl); 41 + 42 + spin_lock_irqsave(&ctx->dev->irq_lock, flags); 43 + 44 + switch (ctx->src_fmt.pixelformat) { 45 + case V4L2_PIX_FMT_MPEG2_SLICE: 46 + run.mpeg2.slice_params = cedrus_find_control_data(ctx, 47 + V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS); 48 + run.mpeg2.quantization = cedrus_find_control_data(ctx, 49 + V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION); 50 + break; 51 + 52 + default: 53 + break; 54 + } 55 + 56 + dev->dec_ops[ctx->current_codec]->setup(ctx, &run); 57 + 58 + spin_unlock_irqrestore(&ctx->dev->irq_lock, flags); 59 + 60 + /* Complete request(s) controls if needed. */ 61 + 62 + if (src_req) 63 + v4l2_ctrl_request_complete(src_req, &ctx->hdl); 64 + 65 + spin_lock_irqsave(&ctx->dev->irq_lock, flags); 66 + 67 + dev->dec_ops[ctx->current_codec]->trigger(ctx); 68 + 69 + spin_unlock_irqrestore(&ctx->dev->irq_lock, flags); 70 + }
+27
drivers/staging/media/sunxi/cedrus/cedrus_dec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #ifndef _CEDRUS_DEC_H_ 17 + #define _CEDRUS_DEC_H_ 18 + 19 + extern const struct v4l2_ioctl_ops cedrus_ioctl_ops; 20 + 21 + void cedrus_device_work(struct work_struct *work); 22 + void cedrus_device_run(void *priv); 23 + 24 + int cedrus_queue_init(void *priv, struct vb2_queue *src_vq, 25 + struct vb2_queue *dst_vq); 26 + 27 + #endif
+327
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #include <linux/platform_device.h> 17 + #include <linux/of_reserved_mem.h> 18 + #include <linux/of_device.h> 19 + #include <linux/dma-mapping.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/clk.h> 22 + #include <linux/regmap.h> 23 + #include <linux/reset.h> 24 + #include <linux/soc/sunxi/sunxi_sram.h> 25 + 26 + #include <media/videobuf2-core.h> 27 + #include <media/v4l2-mem2mem.h> 28 + 29 + #include "cedrus.h" 30 + #include "cedrus_hw.h" 31 + #include "cedrus_regs.h" 32 + 33 + int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec) 34 + { 35 + u32 reg = 0; 36 + 37 + /* 38 + * FIXME: This is only valid on 32-bits DDR's, we should test 39 + * it on the A13/A33. 40 + */ 41 + reg |= VE_MODE_REC_WR_MODE_2MB; 42 + reg |= VE_MODE_DDR_MODE_BW_128; 43 + 44 + switch (codec) { 45 + case CEDRUS_CODEC_MPEG2: 46 + reg |= VE_MODE_DEC_MPEG; 47 + break; 48 + 49 + default: 50 + return -EINVAL; 51 + } 52 + 53 + cedrus_write(dev, VE_MODE, reg); 54 + 55 + return 0; 56 + } 57 + 58 + void cedrus_engine_disable(struct cedrus_dev *dev) 59 + { 60 + cedrus_write(dev, VE_MODE, VE_MODE_DISABLED); 61 + } 62 + 63 + void cedrus_dst_format_set(struct cedrus_dev *dev, 64 + struct v4l2_pix_format *fmt) 65 + { 66 + unsigned int width = fmt->width; 67 + unsigned int height = fmt->height; 68 + u32 chroma_size; 69 + u32 reg; 70 + 71 + switch (fmt->pixelformat) { 72 + case V4L2_PIX_FMT_NV12: 73 + chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2; 74 + 75 + reg = VE_PRIMARY_OUT_FMT_NV12; 76 + cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg); 77 + 78 + reg = VE_CHROMA_BUF_LEN_SDRT(chroma_size / 2); 79 + cedrus_write(dev, VE_CHROMA_BUF_LEN, reg); 80 + 81 + reg = chroma_size / 2; 82 + cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg); 83 + 84 + reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) | 85 + VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2); 86 + cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg); 87 + 88 + break; 89 + case V4L2_PIX_FMT_SUNXI_TILED_NV12: 90 + default: 91 + reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12; 92 + cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg); 93 + 94 + reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12; 95 + cedrus_write(dev, VE_CHROMA_BUF_LEN, reg); 96 + 97 + break; 98 + } 99 + } 100 + 101 + static irqreturn_t cedrus_bh(int irq, void *data) 102 + { 103 + struct cedrus_dev *dev = data; 104 + struct cedrus_ctx *ctx; 105 + 106 + ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); 107 + if (!ctx) { 108 + v4l2_err(&dev->v4l2_dev, 109 + "Instance released before the end of transaction\n"); 110 + return IRQ_HANDLED; 111 + } 112 + 113 + v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); 114 + 115 + return IRQ_HANDLED; 116 + } 117 + 118 + static irqreturn_t cedrus_irq(int irq, void *data) 119 + { 120 + struct cedrus_dev *dev = data; 121 + struct cedrus_ctx *ctx; 122 + struct vb2_v4l2_buffer *src_buf, *dst_buf; 123 + enum vb2_buffer_state state; 124 + enum cedrus_irq_status status; 125 + unsigned long flags; 126 + 127 + spin_lock_irqsave(&dev->irq_lock, flags); 128 + 129 + ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); 130 + if (!ctx) { 131 + v4l2_err(&dev->v4l2_dev, 132 + "Instance released before the end of transaction\n"); 133 + spin_unlock_irqrestore(&dev->irq_lock, flags); 134 + 135 + return IRQ_NONE; 136 + } 137 + 138 + status = dev->dec_ops[ctx->current_codec]->irq_status(ctx); 139 + if (status == CEDRUS_IRQ_NONE) { 140 + spin_unlock_irqrestore(&dev->irq_lock, flags); 141 + return IRQ_NONE; 142 + } 143 + 144 + dev->dec_ops[ctx->current_codec]->irq_disable(ctx); 145 + dev->dec_ops[ctx->current_codec]->irq_clear(ctx); 146 + 147 + src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 148 + dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 149 + 150 + if (!src_buf || !dst_buf) { 151 + v4l2_err(&dev->v4l2_dev, 152 + "Missing source and/or destination buffers\n"); 153 + spin_unlock_irqrestore(&dev->irq_lock, flags); 154 + 155 + return IRQ_HANDLED; 156 + } 157 + 158 + if (status == CEDRUS_IRQ_ERROR) 159 + state = VB2_BUF_STATE_ERROR; 160 + else 161 + state = VB2_BUF_STATE_DONE; 162 + 163 + v4l2_m2m_buf_done(src_buf, state); 164 + v4l2_m2m_buf_done(dst_buf, state); 165 + 166 + spin_unlock_irqrestore(&dev->irq_lock, flags); 167 + 168 + return IRQ_WAKE_THREAD; 169 + } 170 + 171 + int cedrus_hw_probe(struct cedrus_dev *dev) 172 + { 173 + const struct cedrus_variant *variant; 174 + struct resource *res; 175 + int irq_dec; 176 + int ret; 177 + 178 + variant = of_device_get_match_data(dev->dev); 179 + if (!variant) 180 + return -EINVAL; 181 + 182 + dev->capabilities = variant->capabilities; 183 + 184 + irq_dec = platform_get_irq(dev->pdev, 0); 185 + if (irq_dec <= 0) { 186 + v4l2_err(&dev->v4l2_dev, "Failed to get IRQ\n"); 187 + 188 + return irq_dec; 189 + } 190 + ret = devm_request_threaded_irq(dev->dev, irq_dec, cedrus_irq, 191 + cedrus_bh, 0, dev_name(dev->dev), 192 + dev); 193 + if (ret) { 194 + v4l2_err(&dev->v4l2_dev, "Failed to request IRQ\n"); 195 + 196 + return ret; 197 + } 198 + 199 + /* 200 + * The VPU is only able to handle bus addresses so we have to subtract 201 + * the RAM offset to the physcal addresses. 202 + * 203 + * This information will eventually be obtained from device-tree. 204 + */ 205 + 206 + #ifdef PHYS_PFN_OFFSET 207 + dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET; 208 + #endif 209 + 210 + ret = of_reserved_mem_device_init(dev->dev); 211 + if (ret && ret != -ENODEV) { 212 + v4l2_err(&dev->v4l2_dev, "Failed to reserve memory\n"); 213 + 214 + return ret; 215 + } 216 + 217 + ret = sunxi_sram_claim(dev->dev); 218 + if (ret) { 219 + v4l2_err(&dev->v4l2_dev, "Failed to claim SRAM\n"); 220 + 221 + goto err_mem; 222 + } 223 + 224 + dev->ahb_clk = devm_clk_get(dev->dev, "ahb"); 225 + if (IS_ERR(dev->ahb_clk)) { 226 + v4l2_err(&dev->v4l2_dev, "Failed to get AHB clock\n"); 227 + 228 + ret = PTR_ERR(dev->ahb_clk); 229 + goto err_sram; 230 + } 231 + 232 + dev->mod_clk = devm_clk_get(dev->dev, "mod"); 233 + if (IS_ERR(dev->mod_clk)) { 234 + v4l2_err(&dev->v4l2_dev, "Failed to get MOD clock\n"); 235 + 236 + ret = PTR_ERR(dev->mod_clk); 237 + goto err_sram; 238 + } 239 + 240 + dev->ram_clk = devm_clk_get(dev->dev, "ram"); 241 + if (IS_ERR(dev->ram_clk)) { 242 + v4l2_err(&dev->v4l2_dev, "Failed to get RAM clock\n"); 243 + 244 + ret = PTR_ERR(dev->ram_clk); 245 + goto err_sram; 246 + } 247 + 248 + dev->rstc = devm_reset_control_get(dev->dev, NULL); 249 + if (IS_ERR(dev->rstc)) { 250 + v4l2_err(&dev->v4l2_dev, "Failed to get reset control\n"); 251 + 252 + ret = PTR_ERR(dev->rstc); 253 + goto err_sram; 254 + } 255 + 256 + res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0); 257 + dev->base = devm_ioremap_resource(dev->dev, res); 258 + if (!dev->base) { 259 + v4l2_err(&dev->v4l2_dev, "Failed to map registers\n"); 260 + 261 + ret = -ENOMEM; 262 + goto err_sram; 263 + } 264 + 265 + ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT); 266 + if (ret) { 267 + v4l2_err(&dev->v4l2_dev, "Failed to set clock rate\n"); 268 + 269 + goto err_sram; 270 + } 271 + 272 + ret = clk_prepare_enable(dev->ahb_clk); 273 + if (ret) { 274 + v4l2_err(&dev->v4l2_dev, "Failed to enable AHB clock\n"); 275 + 276 + goto err_sram; 277 + } 278 + 279 + ret = clk_prepare_enable(dev->mod_clk); 280 + if (ret) { 281 + v4l2_err(&dev->v4l2_dev, "Failed to enable MOD clock\n"); 282 + 283 + goto err_ahb_clk; 284 + } 285 + 286 + ret = clk_prepare_enable(dev->ram_clk); 287 + if (ret) { 288 + v4l2_err(&dev->v4l2_dev, "Failed to enable RAM clock\n"); 289 + 290 + goto err_mod_clk; 291 + } 292 + 293 + ret = reset_control_reset(dev->rstc); 294 + if (ret) { 295 + v4l2_err(&dev->v4l2_dev, "Failed to apply reset\n"); 296 + 297 + goto err_ram_clk; 298 + } 299 + 300 + return 0; 301 + 302 + err_ram_clk: 303 + clk_disable_unprepare(dev->ram_clk); 304 + err_mod_clk: 305 + clk_disable_unprepare(dev->mod_clk); 306 + err_ahb_clk: 307 + clk_disable_unprepare(dev->ahb_clk); 308 + err_sram: 309 + sunxi_sram_release(dev->dev); 310 + err_mem: 311 + of_reserved_mem_device_release(dev->dev); 312 + 313 + return ret; 314 + } 315 + 316 + void cedrus_hw_remove(struct cedrus_dev *dev) 317 + { 318 + reset_control_assert(dev->rstc); 319 + 320 + clk_disable_unprepare(dev->ram_clk); 321 + clk_disable_unprepare(dev->mod_clk); 322 + clk_disable_unprepare(dev->ahb_clk); 323 + 324 + sunxi_sram_release(dev->dev); 325 + 326 + of_reserved_mem_device_release(dev->dev); 327 + }
+30
drivers/staging/media/sunxi/cedrus/cedrus_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #ifndef _CEDRUS_HW_H_ 17 + #define _CEDRUS_HW_H_ 18 + 19 + #define CEDRUS_CLOCK_RATE_DEFAULT 320000000 20 + 21 + int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec); 22 + void cedrus_engine_disable(struct cedrus_dev *dev); 23 + 24 + void cedrus_dst_format_set(struct cedrus_dev *dev, 25 + struct v4l2_pix_format *fmt); 26 + 27 + int cedrus_hw_probe(struct cedrus_dev *dev); 28 + void cedrus_hw_remove(struct cedrus_dev *dev); 29 + 30 + #endif
+246
drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + */ 9 + 10 + #include <media/videobuf2-dma-contig.h> 11 + 12 + #include "cedrus.h" 13 + #include "cedrus_hw.h" 14 + #include "cedrus_regs.h" 15 + 16 + /* Default MPEG-2 quantization coefficients, from the specification. */ 17 + 18 + static const u8 intra_quantization_matrix_default[64] = { 19 + 8, 16, 16, 19, 16, 19, 22, 22, 20 + 22, 22, 22, 22, 26, 24, 26, 27, 21 + 27, 27, 26, 26, 26, 26, 27, 27, 22 + 27, 29, 29, 29, 34, 34, 34, 29, 23 + 29, 29, 27, 27, 29, 29, 32, 32, 24 + 34, 34, 37, 38, 37, 35, 35, 34, 25 + 35, 38, 38, 40, 40, 40, 48, 48, 26 + 46, 46, 56, 56, 58, 69, 69, 83 27 + }; 28 + 29 + static const u8 non_intra_quantization_matrix_default[64] = { 30 + 16, 16, 16, 16, 16, 16, 16, 16, 31 + 16, 16, 16, 16, 16, 16, 16, 16, 32 + 16, 16, 16, 16, 16, 16, 16, 16, 33 + 16, 16, 16, 16, 16, 16, 16, 16, 34 + 16, 16, 16, 16, 16, 16, 16, 16, 35 + 16, 16, 16, 16, 16, 16, 16, 16, 36 + 16, 16, 16, 16, 16, 16, 16, 16, 37 + 16, 16, 16, 16, 16, 16, 16, 16 38 + }; 39 + 40 + static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx) 41 + { 42 + struct cedrus_dev *dev = ctx->dev; 43 + u32 reg; 44 + 45 + reg = cedrus_read(dev, VE_DEC_MPEG_STATUS); 46 + reg &= VE_DEC_MPEG_STATUS_CHECK_MASK; 47 + 48 + if (!reg) 49 + return CEDRUS_IRQ_NONE; 50 + 51 + if (reg & VE_DEC_MPEG_STATUS_CHECK_ERROR || 52 + !(reg & VE_DEC_MPEG_STATUS_SUCCESS)) 53 + return CEDRUS_IRQ_ERROR; 54 + 55 + return CEDRUS_IRQ_OK; 56 + } 57 + 58 + static void cedrus_mpeg2_irq_clear(struct cedrus_ctx *ctx) 59 + { 60 + struct cedrus_dev *dev = ctx->dev; 61 + 62 + cedrus_write(dev, VE_DEC_MPEG_STATUS, VE_DEC_MPEG_STATUS_CHECK_MASK); 63 + } 64 + 65 + static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx) 66 + { 67 + struct cedrus_dev *dev = ctx->dev; 68 + u32 reg = cedrus_read(dev, VE_DEC_MPEG_CTRL); 69 + 70 + reg &= ~VE_DEC_MPEG_CTRL_IRQ_MASK; 71 + 72 + cedrus_write(dev, VE_DEC_MPEG_CTRL, reg); 73 + } 74 + 75 + static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) 76 + { 77 + const struct v4l2_ctrl_mpeg2_slice_params *slice_params; 78 + const struct v4l2_mpeg2_sequence *sequence; 79 + const struct v4l2_mpeg2_picture *picture; 80 + const struct v4l2_ctrl_mpeg2_quantization *quantization; 81 + dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr; 82 + dma_addr_t fwd_luma_addr, fwd_chroma_addr; 83 + dma_addr_t bwd_luma_addr, bwd_chroma_addr; 84 + struct cedrus_dev *dev = ctx->dev; 85 + const u8 *matrix; 86 + unsigned int i; 87 + u32 reg; 88 + 89 + slice_params = run->mpeg2.slice_params; 90 + sequence = &slice_params->sequence; 91 + picture = &slice_params->picture; 92 + 93 + quantization = run->mpeg2.quantization; 94 + 95 + /* Activate MPEG engine. */ 96 + cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2); 97 + 98 + /* Set intra quantization matrix. */ 99 + 100 + if (quantization && quantization->load_intra_quantiser_matrix) 101 + matrix = quantization->intra_quantiser_matrix; 102 + else 103 + matrix = intra_quantization_matrix_default; 104 + 105 + for (i = 0; i < 64; i++) { 106 + reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]); 107 + reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA; 108 + 109 + cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg); 110 + } 111 + 112 + /* Set non-intra quantization matrix. */ 113 + 114 + if (quantization && quantization->load_non_intra_quantiser_matrix) 115 + matrix = quantization->non_intra_quantiser_matrix; 116 + else 117 + matrix = non_intra_quantization_matrix_default; 118 + 119 + for (i = 0; i < 64; i++) { 120 + reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]); 121 + reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA; 122 + 123 + cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg); 124 + } 125 + 126 + /* Set MPEG picture header. */ 127 + 128 + reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(picture->picture_coding_type); 129 + reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, picture->f_code[0][0]); 130 + reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, picture->f_code[0][1]); 131 + reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, picture->f_code[1][0]); 132 + reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, picture->f_code[1][1]); 133 + reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(picture->intra_dc_precision); 134 + reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(picture->picture_structure); 135 + reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(picture->top_field_first); 136 + reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(picture->frame_pred_frame_dct); 137 + reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(picture->concealment_motion_vectors); 138 + reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(picture->q_scale_type); 139 + reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(picture->intra_vlc_format); 140 + reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(picture->alternate_scan); 141 + reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0); 142 + reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0); 143 + 144 + cedrus_write(dev, VE_DEC_MPEG_MP12HDR, reg); 145 + 146 + /* Set frame dimensions. */ 147 + 148 + reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(sequence->horizontal_size); 149 + reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(sequence->vertical_size); 150 + 151 + cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg); 152 + 153 + reg = VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(ctx->src_fmt.width); 154 + reg |= VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(ctx->src_fmt.height); 155 + 156 + cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg); 157 + 158 + /* Forward and backward prediction reference buffers. */ 159 + 160 + fwd_luma_addr = cedrus_dst_buf_addr(ctx, 161 + slice_params->forward_ref_index, 162 + 0); 163 + fwd_chroma_addr = cedrus_dst_buf_addr(ctx, 164 + slice_params->forward_ref_index, 165 + 1); 166 + 167 + cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr); 168 + cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr); 169 + 170 + bwd_luma_addr = cedrus_dst_buf_addr(ctx, 171 + slice_params->backward_ref_index, 172 + 0); 173 + bwd_chroma_addr = cedrus_dst_buf_addr(ctx, 174 + slice_params->backward_ref_index, 175 + 1); 176 + 177 + cedrus_write(dev, VE_DEC_MPEG_BWD_REF_LUMA_ADDR, bwd_luma_addr); 178 + cedrus_write(dev, VE_DEC_MPEG_BWD_REF_CHROMA_ADDR, bwd_chroma_addr); 179 + 180 + /* Destination luma and chroma buffers. */ 181 + 182 + dst_luma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 0); 183 + dst_chroma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 1); 184 + 185 + cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr); 186 + cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr); 187 + 188 + /* Source offset and length in bits. */ 189 + 190 + cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET, 191 + slice_params->data_bit_offset); 192 + 193 + reg = slice_params->bit_size - slice_params->data_bit_offset; 194 + cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg); 195 + 196 + /* Source beginning and end addresses. */ 197 + 198 + src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0); 199 + 200 + reg = VE_DEC_MPEG_VLD_ADDR_BASE(src_buf_addr); 201 + reg |= VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA; 202 + reg |= VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA; 203 + reg |= VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA; 204 + 205 + cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg); 206 + 207 + reg = src_buf_addr + DIV_ROUND_UP(slice_params->bit_size, 8); 208 + cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg); 209 + 210 + /* Macroblock address: start at the beginning. */ 211 + reg = VE_DEC_MPEG_MBADDR_Y(0) | VE_DEC_MPEG_MBADDR_X(0); 212 + cedrus_write(dev, VE_DEC_MPEG_MBADDR, reg); 213 + 214 + /* Clear previous errors. */ 215 + cedrus_write(dev, VE_DEC_MPEG_ERROR, 0); 216 + 217 + /* Clear correct macroblocks register. */ 218 + cedrus_write(dev, VE_DEC_MPEG_CRTMBADDR, 0); 219 + 220 + /* Enable appropriate interruptions and components. */ 221 + 222 + reg = VE_DEC_MPEG_CTRL_IRQ_MASK | VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK | 223 + VE_DEC_MPEG_CTRL_MC_CACHE_EN; 224 + 225 + cedrus_write(dev, VE_DEC_MPEG_CTRL, reg); 226 + } 227 + 228 + static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx) 229 + { 230 + struct cedrus_dev *dev = ctx->dev; 231 + u32 reg; 232 + 233 + /* Trigger MPEG engine. */ 234 + reg = VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD | VE_DEC_MPEG_TRIGGER_MPEG2 | 235 + VE_DEC_MPEG_TRIGGER_MB_BOUNDARY; 236 + 237 + cedrus_write(dev, VE_DEC_MPEG_TRIGGER, reg); 238 + } 239 + 240 + struct cedrus_dec_ops cedrus_dec_ops_mpeg2 = { 241 + .irq_clear = cedrus_mpeg2_irq_clear, 242 + .irq_disable = cedrus_mpeg2_irq_disable, 243 + .irq_status = cedrus_mpeg2_irq_status, 244 + .setup = cedrus_mpeg2_setup, 245 + .trigger = cedrus_mpeg2_trigger, 246 + };
+235
drivers/staging/media/sunxi/cedrus/cedrus_regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (c) 2013-2016 Jens Kuske <jenskuske@gmail.com> 6 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 7 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 8 + */ 9 + 10 + #ifndef _CEDRUS_REGS_H_ 11 + #define _CEDRUS_REGS_H_ 12 + 13 + /* 14 + * Common acronyms and contractions used in register descriptions: 15 + * * VLD : Variable-Length Decoder 16 + * * IQ: Inverse Quantization 17 + * * IDCT: Inverse Discrete Cosine Transform 18 + * * MC: Motion Compensation 19 + * * STCD: Start Code Detect 20 + * * SDRT: Scale Down and Rotate 21 + */ 22 + 23 + #define VE_ENGINE_DEC_MPEG 0x100 24 + #define VE_ENGINE_DEC_H264 0x200 25 + 26 + #define VE_MODE 0x00 27 + 28 + #define VE_MODE_REC_WR_MODE_2MB (0x01 << 20) 29 + #define VE_MODE_REC_WR_MODE_1MB (0x00 << 20) 30 + #define VE_MODE_DDR_MODE_BW_128 (0x03 << 16) 31 + #define VE_MODE_DDR_MODE_BW_256 (0x02 << 16) 32 + #define VE_MODE_DISABLED (0x07 << 0) 33 + #define VE_MODE_DEC_H265 (0x04 << 0) 34 + #define VE_MODE_DEC_H264 (0x01 << 0) 35 + #define VE_MODE_DEC_MPEG (0x00 << 0) 36 + 37 + #define VE_PRIMARY_CHROMA_BUF_LEN 0xc4 38 + #define VE_PRIMARY_FB_LINE_STRIDE 0xc8 39 + 40 + #define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16)) 41 + #define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0)) 42 + 43 + #define VE_CHROMA_BUF_LEN 0xe8 44 + 45 + #define VE_SECONDARY_OUT_FMT_TILED_32_NV12 (0x00 << 30) 46 + #define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30) 47 + #define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30) 48 + #define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30) 49 + #define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0)) 50 + 51 + #define VE_PRIMARY_OUT_FMT 0xec 52 + 53 + #define VE_PRIMARY_OUT_FMT_TILED_32_NV12 (0x00 << 4) 54 + #define VE_PRIMARY_OUT_FMT_TILED_128_NV12 (0x01 << 4) 55 + #define VE_PRIMARY_OUT_FMT_YU12 (0x02 << 4) 56 + #define VE_PRIMARY_OUT_FMT_YV12 (0x03 << 4) 57 + #define VE_PRIMARY_OUT_FMT_NV12 (0x04 << 4) 58 + #define VE_PRIMARY_OUT_FMT_NV21 (0x05 << 4) 59 + #define VE_SECONDARY_OUT_FMT_EXT_TILED_32_NV12 (0x00 << 0) 60 + #define VE_SECONDARY_OUT_FMT_EXT_TILED_128_NV12 (0x01 << 0) 61 + #define VE_SECONDARY_OUT_FMT_EXT_YU12 (0x02 << 0) 62 + #define VE_SECONDARY_OUT_FMT_EXT_YV12 (0x03 << 0) 63 + #define VE_SECONDARY_OUT_FMT_EXT_NV12 (0x04 << 0) 64 + #define VE_SECONDARY_OUT_FMT_EXT_NV21 (0x05 << 0) 65 + 66 + #define VE_VERSION 0xf0 67 + 68 + #define VE_VERSION_SHIFT 16 69 + 70 + #define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00) 71 + 72 + #define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28)) 73 + #define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x)) 74 + #define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \ 75 + (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y)) 76 + 77 + #define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \ 78 + (((p) << 10) & GENMASK(11, 10)) 79 + #define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \ 80 + (((s) << 8) & GENMASK(9, 8)) 81 + #define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \ 82 + ((v) ? BIT(7) : 0) 83 + #define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \ 84 + ((v) ? BIT(6) : 0) 85 + #define VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(v) \ 86 + ((v) ? BIT(5) : 0) 87 + #define VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(v) \ 88 + ((v) ? BIT(4) : 0) 89 + #define VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(v) \ 90 + ((v) ? BIT(3) : 0) 91 + #define VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(v) \ 92 + ((v) ? BIT(2) : 0) 93 + #define VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(v) \ 94 + ((v) ? BIT(1) : 0) 95 + #define VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(v) \ 96 + ((v) ? BIT(0) : 0) 97 + 98 + #define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08) 99 + 100 + #define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \ 101 + ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8)) 102 + #define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \ 103 + ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0)) 104 + 105 + #define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c) 106 + 107 + #define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16)) 108 + #define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0)) 109 + 110 + #define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10) 111 + 112 + #define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8)) 113 + #define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(0, 7)) 114 + 115 + #define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14) 116 + 117 + #define VE_DEC_MPEG_CTRL_MC_CACHE_EN BIT(31) 118 + #define VE_DEC_MPEG_CTRL_SW_VLD BIT(27) 119 + #define VE_DEC_MPEG_CTRL_SW_IQ_IS BIT(17) 120 + #define VE_DEC_MPEG_CTRL_QP_AC_DC_OUT_EN BIT(14) 121 + #define VE_DEC_MPEG_CTRL_ROTATE_SCALE_OUT_EN BIT(8) 122 + #define VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK BIT(7) 123 + #define VE_DEC_MPEG_CTRL_ROTATE_IRQ_EN BIT(6) 124 + #define VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN BIT(5) 125 + #define VE_DEC_MPEG_CTRL_ERROR_IRQ_EN BIT(4) 126 + #define VE_DEC_MPEG_CTRL_FINISH_IRQ_EN BIT(3) 127 + #define VE_DEC_MPEG_CTRL_IRQ_MASK \ 128 + (VE_DEC_MPEG_CTRL_FINISH_IRQ_EN | VE_DEC_MPEG_CTRL_ERROR_IRQ_EN | \ 129 + VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN) 130 + 131 + #define VE_DEC_MPEG_TRIGGER (VE_ENGINE_DEC_MPEG + 0x18) 132 + 133 + #define VE_DEC_MPEG_TRIGGER_MB_BOUNDARY BIT(31) 134 + 135 + #define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_420 (0x00 << 27) 136 + #define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_411 (0x01 << 27) 137 + #define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422 (0x02 << 27) 138 + #define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_444 (0x03 << 27) 139 + #define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422T (0x04 << 27) 140 + 141 + #define VE_DEC_MPEG_TRIGGER_MPEG1 (0x01 << 24) 142 + #define VE_DEC_MPEG_TRIGGER_MPEG2 (0x02 << 24) 143 + #define VE_DEC_MPEG_TRIGGER_JPEG (0x03 << 24) 144 + #define VE_DEC_MPEG_TRIGGER_MPEG4 (0x04 << 24) 145 + #define VE_DEC_MPEG_TRIGGER_VP62 (0x05 << 24) 146 + 147 + #define VE_DEC_MPEG_TRIGGER_VP62_AC_GET_BITS BIT(7) 148 + 149 + #define VE_DEC_MPEG_TRIGGER_STCD_VC1 (0x02 << 4) 150 + #define VE_DEC_MPEG_TRIGGER_STCD_MPEG2 (0x01 << 4) 151 + #define VE_DEC_MPEG_TRIGGER_STCD_AVC (0x00 << 4) 152 + 153 + #define VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD (0x0f << 0) 154 + #define VE_DEC_MPEG_TRIGGER_HW_JPEG_VLD (0x0e << 0) 155 + #define VE_DEC_MPEG_TRIGGER_HW_MB (0x0d << 0) 156 + #define VE_DEC_MPEG_TRIGGER_HW_ROTATE (0x0c << 0) 157 + #define VE_DEC_MPEG_TRIGGER_HW_VP6_VLD (0x0b << 0) 158 + #define VE_DEC_MPEG_TRIGGER_HW_MAF (0x0a << 0) 159 + #define VE_DEC_MPEG_TRIGGER_HW_STCD_END (0x09 << 0) 160 + #define VE_DEC_MPEG_TRIGGER_HW_STCD_BEGIN (0x08 << 0) 161 + #define VE_DEC_MPEG_TRIGGER_SW_MC (0x07 << 0) 162 + #define VE_DEC_MPEG_TRIGGER_SW_IQ (0x06 << 0) 163 + #define VE_DEC_MPEG_TRIGGER_SW_IDCT (0x05 << 0) 164 + #define VE_DEC_MPEG_TRIGGER_SW_SCALE (0x04 << 0) 165 + #define VE_DEC_MPEG_TRIGGER_SW_VP6 (0x03 << 0) 166 + #define VE_DEC_MPEG_TRIGGER_SW_VP62_AC_GET_BITS (0x02 << 0) 167 + 168 + #define VE_DEC_MPEG_STATUS (VE_ENGINE_DEC_MPEG + 0x1c) 169 + 170 + #define VE_DEC_MPEG_STATUS_START_DETECT_BUSY BIT(27) 171 + #define VE_DEC_MPEG_STATUS_VP6_BIT BIT(26) 172 + #define VE_DEC_MPEG_STATUS_VP6_BIT_BUSY BIT(25) 173 + #define VE_DEC_MPEG_STATUS_MAF_BUSY BIT(23) 174 + #define VE_DEC_MPEG_STATUS_VP6_MVP_BUSY BIT(22) 175 + #define VE_DEC_MPEG_STATUS_JPEG_BIT_END BIT(21) 176 + #define VE_DEC_MPEG_STATUS_JPEG_RESTART_ERROR BIT(20) 177 + #define VE_DEC_MPEG_STATUS_JPEG_MARKER BIT(19) 178 + #define VE_DEC_MPEG_STATUS_ROTATE_BUSY BIT(18) 179 + #define VE_DEC_MPEG_STATUS_DEBLOCKING_BUSY BIT(17) 180 + #define VE_DEC_MPEG_STATUS_SCALE_DOWN_BUSY BIT(16) 181 + #define VE_DEC_MPEG_STATUS_IQIS_BUF_EMPTY BIT(15) 182 + #define VE_DEC_MPEG_STATUS_IDCT_BUF_EMPTY BIT(14) 183 + #define VE_DEC_MPEG_STATUS_VE_BUSY BIT(13) 184 + #define VE_DEC_MPEG_STATUS_MC_BUSY BIT(12) 185 + #define VE_DEC_MPEG_STATUS_IDCT_BUSY BIT(11) 186 + #define VE_DEC_MPEG_STATUS_IQIS_BUSY BIT(10) 187 + #define VE_DEC_MPEG_STATUS_DCAC_BUSY BIT(9) 188 + #define VE_DEC_MPEG_STATUS_VLD_BUSY BIT(8) 189 + #define VE_DEC_MPEG_STATUS_ROTATE_SUCCESS BIT(3) 190 + #define VE_DEC_MPEG_STATUS_VLD_DATA_REQ BIT(2) 191 + #define VE_DEC_MPEG_STATUS_ERROR BIT(1) 192 + #define VE_DEC_MPEG_STATUS_SUCCESS BIT(0) 193 + #define VE_DEC_MPEG_STATUS_CHECK_MASK \ 194 + (VE_DEC_MPEG_STATUS_SUCCESS | VE_DEC_MPEG_STATUS_ERROR | \ 195 + VE_DEC_MPEG_STATUS_VLD_DATA_REQ) 196 + #define VE_DEC_MPEG_STATUS_CHECK_ERROR \ 197 + (VE_DEC_MPEG_STATUS_ERROR | VE_DEC_MPEG_STATUS_VLD_DATA_REQ) 198 + 199 + #define VE_DEC_MPEG_VLD_ADDR (VE_ENGINE_DEC_MPEG + 0x28) 200 + 201 + #define VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA BIT(30) 202 + #define VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA BIT(29) 203 + #define VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA BIT(28) 204 + #define VE_DEC_MPEG_VLD_ADDR_BASE(a) \ 205 + ({ \ 206 + u32 _tmp = (a); \ 207 + u32 _lo = _tmp & GENMASK(27, 4); \ 208 + u32 _hi = (_tmp >> 28) & GENMASK(3, 0); \ 209 + (_lo | _hi); \ 210 + }) 211 + 212 + #define VE_DEC_MPEG_VLD_OFFSET (VE_ENGINE_DEC_MPEG + 0x2c) 213 + #define VE_DEC_MPEG_VLD_LEN (VE_ENGINE_DEC_MPEG + 0x30) 214 + #define VE_DEC_MPEG_VLD_END_ADDR (VE_ENGINE_DEC_MPEG + 0x34) 215 + 216 + #define VE_DEC_MPEG_REC_LUMA (VE_ENGINE_DEC_MPEG + 0x48) 217 + #define VE_DEC_MPEG_REC_CHROMA (VE_ENGINE_DEC_MPEG + 0x4c) 218 + #define VE_DEC_MPEG_FWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x50) 219 + #define VE_DEC_MPEG_FWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x54) 220 + #define VE_DEC_MPEG_BWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x58) 221 + #define VE_DEC_MPEG_BWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x5c) 222 + 223 + #define VE_DEC_MPEG_IQMINPUT (VE_ENGINE_DEC_MPEG + 0x80) 224 + 225 + #define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14) 226 + #define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14) 227 + #define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \ 228 + (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8))) 229 + 230 + #define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4) 231 + #define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8) 232 + #define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc) 233 + #define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0) 234 + 235 + #endif
+542
drivers/staging/media/sunxi/cedrus/cedrus_video.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #include <media/videobuf2-dma-contig.h> 17 + #include <media/v4l2-device.h> 18 + #include <media/v4l2-ioctl.h> 19 + #include <media/v4l2-event.h> 20 + #include <media/v4l2-mem2mem.h> 21 + 22 + #include "cedrus.h" 23 + #include "cedrus_video.h" 24 + #include "cedrus_dec.h" 25 + #include "cedrus_hw.h" 26 + 27 + #define CEDRUS_DECODE_SRC BIT(0) 28 + #define CEDRUS_DECODE_DST BIT(1) 29 + 30 + #define CEDRUS_MIN_WIDTH 16U 31 + #define CEDRUS_MIN_HEIGHT 16U 32 + #define CEDRUS_MAX_WIDTH 3840U 33 + #define CEDRUS_MAX_HEIGHT 2160U 34 + 35 + static struct cedrus_format cedrus_formats[] = { 36 + { 37 + .pixelformat = V4L2_PIX_FMT_MPEG2_SLICE, 38 + .directions = CEDRUS_DECODE_SRC, 39 + }, 40 + { 41 + .pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12, 42 + .directions = CEDRUS_DECODE_DST, 43 + }, 44 + { 45 + .pixelformat = V4L2_PIX_FMT_NV12, 46 + .directions = CEDRUS_DECODE_DST, 47 + .capabilities = CEDRUS_CAPABILITY_UNTILED, 48 + }, 49 + }; 50 + 51 + #define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats) 52 + 53 + static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file) 54 + { 55 + return container_of(file->private_data, struct cedrus_ctx, fh); 56 + } 57 + 58 + static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions, 59 + unsigned int capabilities) 60 + { 61 + struct cedrus_format *fmt; 62 + unsigned int i; 63 + 64 + for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) { 65 + fmt = &cedrus_formats[i]; 66 + 67 + if (fmt->capabilities && (fmt->capabilities & capabilities) != 68 + fmt->capabilities) 69 + continue; 70 + 71 + if (fmt->pixelformat == pixelformat && 72 + (fmt->directions & directions) != 0) 73 + break; 74 + } 75 + 76 + if (i == CEDRUS_FORMATS_COUNT) 77 + return NULL; 78 + 79 + return &cedrus_formats[i]; 80 + } 81 + 82 + static bool cedrus_check_format(u32 pixelformat, u32 directions, 83 + unsigned int capabilities) 84 + { 85 + return cedrus_find_format(pixelformat, directions, capabilities); 86 + } 87 + 88 + static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt) 89 + { 90 + unsigned int width = pix_fmt->width; 91 + unsigned int height = pix_fmt->height; 92 + unsigned int sizeimage = pix_fmt->sizeimage; 93 + unsigned int bytesperline = pix_fmt->bytesperline; 94 + 95 + pix_fmt->field = V4L2_FIELD_NONE; 96 + 97 + /* Limit to hardware min/max. */ 98 + width = clamp(width, CEDRUS_MIN_WIDTH, CEDRUS_MAX_WIDTH); 99 + height = clamp(height, CEDRUS_MIN_HEIGHT, CEDRUS_MAX_HEIGHT); 100 + 101 + switch (pix_fmt->pixelformat) { 102 + case V4L2_PIX_FMT_MPEG2_SLICE: 103 + /* Zero bytes per line for encoded source. */ 104 + bytesperline = 0; 105 + 106 + break; 107 + 108 + case V4L2_PIX_FMT_SUNXI_TILED_NV12: 109 + /* 32-aligned stride. */ 110 + bytesperline = ALIGN(width, 32); 111 + 112 + /* 32-aligned height. */ 113 + height = ALIGN(height, 32); 114 + 115 + /* Luma plane size. */ 116 + sizeimage = bytesperline * height; 117 + 118 + /* Chroma plane size. */ 119 + sizeimage += bytesperline * height / 2; 120 + 121 + break; 122 + 123 + case V4L2_PIX_FMT_NV12: 124 + /* 16-aligned stride. */ 125 + bytesperline = ALIGN(width, 16); 126 + 127 + /* 16-aligned height. */ 128 + height = ALIGN(height, 16); 129 + 130 + /* Luma plane size. */ 131 + sizeimage = bytesperline * height; 132 + 133 + /* Chroma plane size. */ 134 + sizeimage += bytesperline * height / 2; 135 + 136 + break; 137 + } 138 + 139 + pix_fmt->width = width; 140 + pix_fmt->height = height; 141 + 142 + pix_fmt->bytesperline = bytesperline; 143 + pix_fmt->sizeimage = sizeimage; 144 + } 145 + 146 + static int cedrus_querycap(struct file *file, void *priv, 147 + struct v4l2_capability *cap) 148 + { 149 + strscpy(cap->driver, CEDRUS_NAME, sizeof(cap->driver)); 150 + strscpy(cap->card, CEDRUS_NAME, sizeof(cap->card)); 151 + snprintf(cap->bus_info, sizeof(cap->bus_info), 152 + "platform:%s", CEDRUS_NAME); 153 + 154 + return 0; 155 + } 156 + 157 + static int cedrus_enum_fmt(struct file *file, struct v4l2_fmtdesc *f, 158 + u32 direction) 159 + { 160 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 161 + struct cedrus_dev *dev = ctx->dev; 162 + unsigned int capabilities = dev->capabilities; 163 + struct cedrus_format *fmt; 164 + unsigned int i, index; 165 + 166 + /* Index among formats that match the requested direction. */ 167 + index = 0; 168 + 169 + for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) { 170 + fmt = &cedrus_formats[i]; 171 + 172 + if (fmt->capabilities && (fmt->capabilities & capabilities) != 173 + fmt->capabilities) 174 + continue; 175 + 176 + if (!(cedrus_formats[i].directions & direction)) 177 + continue; 178 + 179 + if (index == f->index) 180 + break; 181 + 182 + index++; 183 + } 184 + 185 + /* Matched format. */ 186 + if (i < CEDRUS_FORMATS_COUNT) { 187 + f->pixelformat = cedrus_formats[i].pixelformat; 188 + 189 + return 0; 190 + } 191 + 192 + return -EINVAL; 193 + } 194 + 195 + static int cedrus_enum_fmt_vid_cap(struct file *file, void *priv, 196 + struct v4l2_fmtdesc *f) 197 + { 198 + return cedrus_enum_fmt(file, f, CEDRUS_DECODE_DST); 199 + } 200 + 201 + static int cedrus_enum_fmt_vid_out(struct file *file, void *priv, 202 + struct v4l2_fmtdesc *f) 203 + { 204 + return cedrus_enum_fmt(file, f, CEDRUS_DECODE_SRC); 205 + } 206 + 207 + static int cedrus_g_fmt_vid_cap(struct file *file, void *priv, 208 + struct v4l2_format *f) 209 + { 210 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 211 + 212 + /* Fall back to dummy default by lack of hardware configuration. */ 213 + if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) { 214 + f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12; 215 + cedrus_prepare_format(&f->fmt.pix); 216 + 217 + return 0; 218 + } 219 + 220 + f->fmt.pix = ctx->dst_fmt; 221 + 222 + return 0; 223 + } 224 + 225 + static int cedrus_g_fmt_vid_out(struct file *file, void *priv, 226 + struct v4l2_format *f) 227 + { 228 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 229 + 230 + /* Fall back to dummy default by lack of hardware configuration. */ 231 + if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) { 232 + f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE; 233 + f->fmt.pix.sizeimage = SZ_1K; 234 + cedrus_prepare_format(&f->fmt.pix); 235 + 236 + return 0; 237 + } 238 + 239 + f->fmt.pix = ctx->src_fmt; 240 + 241 + return 0; 242 + } 243 + 244 + static int cedrus_try_fmt_vid_cap(struct file *file, void *priv, 245 + struct v4l2_format *f) 246 + { 247 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 248 + struct cedrus_dev *dev = ctx->dev; 249 + struct v4l2_pix_format *pix_fmt = &f->fmt.pix; 250 + 251 + if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST, 252 + dev->capabilities)) 253 + return -EINVAL; 254 + 255 + cedrus_prepare_format(pix_fmt); 256 + 257 + return 0; 258 + } 259 + 260 + static int cedrus_try_fmt_vid_out(struct file *file, void *priv, 261 + struct v4l2_format *f) 262 + { 263 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 264 + struct cedrus_dev *dev = ctx->dev; 265 + struct v4l2_pix_format *pix_fmt = &f->fmt.pix; 266 + 267 + if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC, 268 + dev->capabilities)) 269 + return -EINVAL; 270 + 271 + /* Source image size has to be provided by userspace. */ 272 + if (pix_fmt->sizeimage == 0) 273 + return -EINVAL; 274 + 275 + cedrus_prepare_format(pix_fmt); 276 + 277 + return 0; 278 + } 279 + 280 + static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, 281 + struct v4l2_format *f) 282 + { 283 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 284 + struct cedrus_dev *dev = ctx->dev; 285 + int ret; 286 + 287 + ret = cedrus_try_fmt_vid_cap(file, priv, f); 288 + if (ret) 289 + return ret; 290 + 291 + ctx->dst_fmt = f->fmt.pix; 292 + 293 + cedrus_dst_format_set(dev, &ctx->dst_fmt); 294 + 295 + return 0; 296 + } 297 + 298 + static int cedrus_s_fmt_vid_out(struct file *file, void *priv, 299 + struct v4l2_format *f) 300 + { 301 + struct cedrus_ctx *ctx = cedrus_file2ctx(file); 302 + int ret; 303 + 304 + ret = cedrus_try_fmt_vid_out(file, priv, f); 305 + if (ret) 306 + return ret; 307 + 308 + ctx->src_fmt = f->fmt.pix; 309 + 310 + /* Propagate colorspace information to capture. */ 311 + ctx->dst_fmt.colorspace = f->fmt.pix.colorspace; 312 + ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func; 313 + ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc; 314 + ctx->dst_fmt.quantization = f->fmt.pix.quantization; 315 + 316 + return 0; 317 + } 318 + 319 + const struct v4l2_ioctl_ops cedrus_ioctl_ops = { 320 + .vidioc_querycap = cedrus_querycap, 321 + 322 + .vidioc_enum_fmt_vid_cap = cedrus_enum_fmt_vid_cap, 323 + .vidioc_g_fmt_vid_cap = cedrus_g_fmt_vid_cap, 324 + .vidioc_try_fmt_vid_cap = cedrus_try_fmt_vid_cap, 325 + .vidioc_s_fmt_vid_cap = cedrus_s_fmt_vid_cap, 326 + 327 + .vidioc_enum_fmt_vid_out = cedrus_enum_fmt_vid_out, 328 + .vidioc_g_fmt_vid_out = cedrus_g_fmt_vid_out, 329 + .vidioc_try_fmt_vid_out = cedrus_try_fmt_vid_out, 330 + .vidioc_s_fmt_vid_out = cedrus_s_fmt_vid_out, 331 + 332 + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, 333 + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, 334 + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, 335 + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, 336 + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, 337 + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, 338 + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, 339 + 340 + .vidioc_streamon = v4l2_m2m_ioctl_streamon, 341 + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, 342 + 343 + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, 344 + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 345 + }; 346 + 347 + static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs, 348 + unsigned int *nplanes, unsigned int sizes[], 349 + struct device *alloc_devs[]) 350 + { 351 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 352 + struct cedrus_dev *dev = ctx->dev; 353 + struct v4l2_pix_format *pix_fmt; 354 + u32 directions; 355 + 356 + if (V4L2_TYPE_IS_OUTPUT(vq->type)) { 357 + directions = CEDRUS_DECODE_SRC; 358 + pix_fmt = &ctx->src_fmt; 359 + } else { 360 + directions = CEDRUS_DECODE_DST; 361 + pix_fmt = &ctx->dst_fmt; 362 + } 363 + 364 + if (!cedrus_check_format(pix_fmt->pixelformat, directions, 365 + dev->capabilities)) 366 + return -EINVAL; 367 + 368 + if (*nplanes) { 369 + if (sizes[0] < pix_fmt->sizeimage) 370 + return -EINVAL; 371 + } else { 372 + sizes[0] = pix_fmt->sizeimage; 373 + *nplanes = 1; 374 + } 375 + 376 + return 0; 377 + } 378 + 379 + static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state) 380 + { 381 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 382 + struct vb2_v4l2_buffer *vbuf; 383 + unsigned long flags; 384 + 385 + for (;;) { 386 + spin_lock_irqsave(&ctx->dev->irq_lock, flags); 387 + 388 + if (V4L2_TYPE_IS_OUTPUT(vq->type)) 389 + vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 390 + else 391 + vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 392 + 393 + spin_unlock_irqrestore(&ctx->dev->irq_lock, flags); 394 + 395 + if (!vbuf) 396 + return; 397 + 398 + v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, 399 + &ctx->hdl); 400 + v4l2_m2m_buf_done(vbuf, state); 401 + } 402 + } 403 + 404 + static int cedrus_buf_init(struct vb2_buffer *vb) 405 + { 406 + struct vb2_queue *vq = vb->vb2_queue; 407 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 408 + 409 + if (!V4L2_TYPE_IS_OUTPUT(vq->type)) 410 + ctx->dst_bufs[vb->index] = vb; 411 + 412 + return 0; 413 + } 414 + 415 + static void cedrus_buf_cleanup(struct vb2_buffer *vb) 416 + { 417 + struct vb2_queue *vq = vb->vb2_queue; 418 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 419 + 420 + if (!V4L2_TYPE_IS_OUTPUT(vq->type)) 421 + ctx->dst_bufs[vb->index] = NULL; 422 + } 423 + 424 + static int cedrus_buf_prepare(struct vb2_buffer *vb) 425 + { 426 + struct vb2_queue *vq = vb->vb2_queue; 427 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 428 + struct v4l2_pix_format *pix_fmt; 429 + 430 + if (V4L2_TYPE_IS_OUTPUT(vq->type)) 431 + pix_fmt = &ctx->src_fmt; 432 + else 433 + pix_fmt = &ctx->dst_fmt; 434 + 435 + if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage) 436 + return -EINVAL; 437 + 438 + vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage); 439 + 440 + return 0; 441 + } 442 + 443 + static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count) 444 + { 445 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 446 + struct cedrus_dev *dev = ctx->dev; 447 + int ret = 0; 448 + 449 + switch (ctx->src_fmt.pixelformat) { 450 + case V4L2_PIX_FMT_MPEG2_SLICE: 451 + ctx->current_codec = CEDRUS_CODEC_MPEG2; 452 + break; 453 + 454 + default: 455 + return -EINVAL; 456 + } 457 + 458 + if (V4L2_TYPE_IS_OUTPUT(vq->type) && 459 + dev->dec_ops[ctx->current_codec]->start) 460 + ret = dev->dec_ops[ctx->current_codec]->start(ctx); 461 + 462 + if (ret) 463 + cedrus_queue_cleanup(vq, VB2_BUF_STATE_QUEUED); 464 + 465 + return ret; 466 + } 467 + 468 + static void cedrus_stop_streaming(struct vb2_queue *vq) 469 + { 470 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vq); 471 + struct cedrus_dev *dev = ctx->dev; 472 + 473 + if (V4L2_TYPE_IS_OUTPUT(vq->type) && 474 + dev->dec_ops[ctx->current_codec]->stop) 475 + dev->dec_ops[ctx->current_codec]->stop(ctx); 476 + 477 + cedrus_queue_cleanup(vq, VB2_BUF_STATE_ERROR); 478 + } 479 + 480 + static void cedrus_buf_queue(struct vb2_buffer *vb) 481 + { 482 + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 483 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 484 + 485 + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); 486 + } 487 + 488 + static void cedrus_buf_request_complete(struct vb2_buffer *vb) 489 + { 490 + struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 491 + 492 + v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); 493 + } 494 + 495 + static struct vb2_ops cedrus_qops = { 496 + .queue_setup = cedrus_queue_setup, 497 + .buf_prepare = cedrus_buf_prepare, 498 + .buf_init = cedrus_buf_init, 499 + .buf_cleanup = cedrus_buf_cleanup, 500 + .buf_queue = cedrus_buf_queue, 501 + .buf_request_complete = cedrus_buf_request_complete, 502 + .start_streaming = cedrus_start_streaming, 503 + .stop_streaming = cedrus_stop_streaming, 504 + .wait_prepare = vb2_ops_wait_prepare, 505 + .wait_finish = vb2_ops_wait_finish, 506 + }; 507 + 508 + int cedrus_queue_init(void *priv, struct vb2_queue *src_vq, 509 + struct vb2_queue *dst_vq) 510 + { 511 + struct cedrus_ctx *ctx = priv; 512 + int ret; 513 + 514 + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 515 + src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 516 + src_vq->drv_priv = ctx; 517 + src_vq->buf_struct_size = sizeof(struct cedrus_buffer); 518 + src_vq->min_buffers_needed = 1; 519 + src_vq->ops = &cedrus_qops; 520 + src_vq->mem_ops = &vb2_dma_contig_memops; 521 + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 522 + src_vq->lock = &ctx->dev->dev_mutex; 523 + src_vq->dev = ctx->dev->dev; 524 + src_vq->supports_requests = true; 525 + 526 + ret = vb2_queue_init(src_vq); 527 + if (ret) 528 + return ret; 529 + 530 + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 531 + dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 532 + dst_vq->drv_priv = ctx; 533 + dst_vq->buf_struct_size = sizeof(struct cedrus_buffer); 534 + dst_vq->min_buffers_needed = 1; 535 + dst_vq->ops = &cedrus_qops; 536 + dst_vq->mem_ops = &vb2_dma_contig_memops; 537 + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 538 + dst_vq->lock = &ctx->dev->dev_mutex; 539 + dst_vq->dev = ctx->dev->dev; 540 + 541 + return vb2_queue_init(dst_vq); 542 + }
+30
drivers/staging/media/sunxi/cedrus/cedrus_video.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Cedrus VPU driver 4 + * 5 + * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com> 6 + * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com> 7 + * Copyright (C) 2018 Bootlin 8 + * 9 + * Based on the vim2m driver, that is: 10 + * 11 + * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 12 + * Pawel Osciak, <pawel@osciak.com> 13 + * Marek Szyprowski, <m.szyprowski@samsung.com> 14 + */ 15 + 16 + #ifndef _CEDRUS_VIDEO_H_ 17 + #define _CEDRUS_VIDEO_H_ 18 + 19 + struct cedrus_format { 20 + u32 pixelformat; 21 + u32 directions; 22 + unsigned int capabilities; 23 + }; 24 + 25 + extern const struct v4l2_ioctl_ops cedrus_ioctl_ops; 26 + 27 + int cedrus_queue_init(void *priv, struct vb2_queue *src_vq, 28 + struct vb2_queue *dst_vq); 29 + 30 + #endif
+1 -1
drivers/usb/gadget/function/uvc_queue.c
··· 166 166 unsigned long flags; 167 167 int ret; 168 168 169 - ret = vb2_qbuf(&queue->queue, buf); 169 + ret = vb2_qbuf(&queue->queue, NULL, buf); 170 170 if (ret < 0) 171 171 return ret; 172 172
+29
include/media/media-device.h
··· 27 27 28 28 struct ida; 29 29 struct device; 30 + struct media_device; 30 31 31 32 /** 32 33 * struct media_entity_notify - Media Entity Notify ··· 51 50 * struct media_device_ops - Media device operations 52 51 * @link_notify: Link state change notification callback. This callback is 53 52 * called with the graph_mutex held. 53 + * @req_alloc: Allocate a request. Set this if you need to allocate a struct 54 + * larger then struct media_request. @req_alloc and @req_free must 55 + * either both be set or both be NULL. 56 + * @req_free: Free a request. Set this if @req_alloc was set as well, leave 57 + * to NULL otherwise. 58 + * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex 59 + * lock is held when this op is called. 60 + * @req_queue: Queue a validated request, cannot fail. If something goes 61 + * wrong when queueing this request then it should be marked 62 + * as such internally in the driver and any related buffers 63 + * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. 64 + * The req_queue_mutex lock is held when this op is called. 65 + * It is important that vb2 buffer objects are queued last after 66 + * all other object types are queued: queueing a buffer kickstarts 67 + * the request processing, so all other objects related to the 68 + * request (and thus the buffer) must be available to the driver. 69 + * And once a buffer is queued, then the driver can complete 70 + * or delete objects from the request before req_queue exits. 54 71 */ 55 72 struct media_device_ops { 56 73 int (*link_notify)(struct media_link *link, u32 flags, 57 74 unsigned int notification); 75 + struct media_request *(*req_alloc)(struct media_device *mdev); 76 + void (*req_free)(struct media_request *req); 77 + int (*req_validate)(struct media_request *req); 78 + void (*req_queue)(struct media_request *req); 58 79 }; 59 80 60 81 /** ··· 111 88 * @disable_source: Disable Source Handler function pointer 112 89 * 113 90 * @ops: Operation handler callbacks 91 + * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. 92 + * other operations that stop or start streaming. 93 + * @request_id: Used to generate unique request IDs 114 94 * 115 95 * This structure represents an abstract high-level media device. It allows easy 116 96 * access to entities and provides basic media device-level support. The ··· 184 158 void (*disable_source)(struct media_entity *entity); 185 159 186 160 const struct media_device_ops *ops; 161 + 162 + struct mutex req_queue_mutex; 163 + atomic_t request_id; 187 164 }; 188 165 189 166 /* We don't need to include pci.h or usb.h here */
+442
include/media/media-request.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Media device request objects 4 + * 5 + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 + * Copyright (C) 2018 Intel Corporation 7 + * 8 + * Author: Hans Verkuil <hans.verkuil@cisco.com> 9 + * Author: Sakari Ailus <sakari.ailus@linux.intel.com> 10 + */ 11 + 12 + #ifndef MEDIA_REQUEST_H 13 + #define MEDIA_REQUEST_H 14 + 15 + #include <linux/list.h> 16 + #include <linux/slab.h> 17 + #include <linux/spinlock.h> 18 + #include <linux/refcount.h> 19 + 20 + #include <media/media-device.h> 21 + 22 + /** 23 + * enum media_request_state - media request state 24 + * 25 + * @MEDIA_REQUEST_STATE_IDLE: Idle 26 + * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes 27 + * allowed 28 + * @MEDIA_REQUEST_STATE_QUEUED: Queued 29 + * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done 30 + * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited 31 + * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e. 32 + * request objects are being added, 33 + * modified or removed 34 + * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used 35 + * internally for sanity check purposes 36 + */ 37 + enum media_request_state { 38 + MEDIA_REQUEST_STATE_IDLE, 39 + MEDIA_REQUEST_STATE_VALIDATING, 40 + MEDIA_REQUEST_STATE_QUEUED, 41 + MEDIA_REQUEST_STATE_COMPLETE, 42 + MEDIA_REQUEST_STATE_CLEANING, 43 + MEDIA_REQUEST_STATE_UPDATING, 44 + NR_OF_MEDIA_REQUEST_STATE, 45 + }; 46 + 47 + struct media_request_object; 48 + 49 + /** 50 + * struct media_request - Media device request 51 + * @mdev: Media device this request belongs to 52 + * @kref: Reference count 53 + * @debug_str: Prefix for debug messages (process name:fd) 54 + * @state: The state of the request 55 + * @updating_count: count the number of request updates that are in progress 56 + * @access_count: count the number of request accesses that are in progress 57 + * @objects: List of @struct media_request_object request objects 58 + * @num_incomplete_objects: The number of incomplete objects in the request 59 + * @poll_wait: Wait queue for poll 60 + * @lock: Serializes access to this struct 61 + */ 62 + struct media_request { 63 + struct media_device *mdev; 64 + struct kref kref; 65 + char debug_str[TASK_COMM_LEN + 11]; 66 + enum media_request_state state; 67 + unsigned int updating_count; 68 + unsigned int access_count; 69 + struct list_head objects; 70 + unsigned int num_incomplete_objects; 71 + struct wait_queue_head poll_wait; 72 + spinlock_t lock; 73 + }; 74 + 75 + #ifdef CONFIG_MEDIA_CONTROLLER 76 + 77 + /** 78 + * media_request_lock_for_access - Lock the request to access its objects 79 + * 80 + * @req: The media request 81 + * 82 + * Use before accessing a completed request. A reference to the request must 83 + * be held during the access. This usually takes place automatically through 84 + * a file handle. Use @media_request_unlock_for_access when done. 85 + */ 86 + static inline int __must_check 87 + media_request_lock_for_access(struct media_request *req) 88 + { 89 + unsigned long flags; 90 + int ret = -EBUSY; 91 + 92 + spin_lock_irqsave(&req->lock, flags); 93 + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { 94 + req->access_count++; 95 + ret = 0; 96 + } 97 + spin_unlock_irqrestore(&req->lock, flags); 98 + 99 + return ret; 100 + } 101 + 102 + /** 103 + * media_request_unlock_for_access - Unlock a request previously locked for 104 + * access 105 + * 106 + * @req: The media request 107 + * 108 + * Unlock a request that has previously been locked using 109 + * @media_request_lock_for_access. 110 + */ 111 + static inline void media_request_unlock_for_access(struct media_request *req) 112 + { 113 + unsigned long flags; 114 + 115 + spin_lock_irqsave(&req->lock, flags); 116 + if (!WARN_ON(!req->access_count)) 117 + req->access_count--; 118 + spin_unlock_irqrestore(&req->lock, flags); 119 + } 120 + 121 + /** 122 + * media_request_lock_for_update - Lock the request for updating its objects 123 + * 124 + * @req: The media request 125 + * 126 + * Use before updating a request, i.e. adding, modifying or removing a request 127 + * object in it. A reference to the request must be held during the update. This 128 + * usually takes place automatically through a file handle. Use 129 + * @media_request_unlock_for_update when done. 130 + */ 131 + static inline int __must_check 132 + media_request_lock_for_update(struct media_request *req) 133 + { 134 + unsigned long flags; 135 + int ret = 0; 136 + 137 + spin_lock_irqsave(&req->lock, flags); 138 + if (req->state == MEDIA_REQUEST_STATE_IDLE || 139 + req->state == MEDIA_REQUEST_STATE_UPDATING) { 140 + req->state = MEDIA_REQUEST_STATE_UPDATING; 141 + req->updating_count++; 142 + } else { 143 + ret = -EBUSY; 144 + } 145 + spin_unlock_irqrestore(&req->lock, flags); 146 + 147 + return ret; 148 + } 149 + 150 + /** 151 + * media_request_unlock_for_update - Unlock a request previously locked for 152 + * update 153 + * 154 + * @req: The media request 155 + * 156 + * Unlock a request that has previously been locked using 157 + * @media_request_lock_for_update. 158 + */ 159 + static inline void media_request_unlock_for_update(struct media_request *req) 160 + { 161 + unsigned long flags; 162 + 163 + spin_lock_irqsave(&req->lock, flags); 164 + WARN_ON(req->updating_count <= 0); 165 + if (!--req->updating_count) 166 + req->state = MEDIA_REQUEST_STATE_IDLE; 167 + spin_unlock_irqrestore(&req->lock, flags); 168 + } 169 + 170 + /** 171 + * media_request_get - Get the media request 172 + * 173 + * @req: The media request 174 + * 175 + * Get the media request. 176 + */ 177 + static inline void media_request_get(struct media_request *req) 178 + { 179 + kref_get(&req->kref); 180 + } 181 + 182 + /** 183 + * media_request_put - Put the media request 184 + * 185 + * @req: The media request 186 + * 187 + * Put the media request. The media request will be released 188 + * when the refcount reaches 0. 189 + */ 190 + void media_request_put(struct media_request *req); 191 + 192 + /** 193 + * media_request_get_by_fd - Get a media request by fd 194 + * 195 + * @mdev: Media device this request belongs to 196 + * @request_fd: The file descriptor of the request 197 + * 198 + * Get the request represented by @request_fd that is owned 199 + * by the media device. 200 + * 201 + * Return a -EACCES error pointer if requests are not supported 202 + * by this driver. Return -EINVAL if the request was not found. 203 + * Return the pointer to the request if found: the caller will 204 + * have to call @media_request_put when it finished using the 205 + * request. 206 + */ 207 + struct media_request * 208 + media_request_get_by_fd(struct media_device *mdev, int request_fd); 209 + 210 + /** 211 + * media_request_alloc - Allocate the media request 212 + * 213 + * @mdev: Media device this request belongs to 214 + * @alloc_fd: Store the request's file descriptor in this int 215 + * 216 + * Allocated the media request and put the fd in @alloc_fd. 217 + */ 218 + int media_request_alloc(struct media_device *mdev, 219 + int *alloc_fd); 220 + 221 + #else 222 + 223 + static inline void media_request_get(struct media_request *req) 224 + { 225 + } 226 + 227 + static inline void media_request_put(struct media_request *req) 228 + { 229 + } 230 + 231 + static inline struct media_request * 232 + media_request_get_by_fd(struct media_device *mdev, int request_fd) 233 + { 234 + return ERR_PTR(-EACCES); 235 + } 236 + 237 + #endif 238 + 239 + /** 240 + * struct media_request_object_ops - Media request object operations 241 + * @prepare: Validate and prepare the request object, optional. 242 + * @unprepare: Unprepare the request object, optional. 243 + * @queue: Queue the request object, optional. 244 + * @unbind: Unbind the request object, optional. 245 + * @release: Release the request object, required. 246 + */ 247 + struct media_request_object_ops { 248 + int (*prepare)(struct media_request_object *object); 249 + void (*unprepare)(struct media_request_object *object); 250 + void (*queue)(struct media_request_object *object); 251 + void (*unbind)(struct media_request_object *object); 252 + void (*release)(struct media_request_object *object); 253 + }; 254 + 255 + /** 256 + * struct media_request_object - An opaque object that belongs to a media 257 + * request 258 + * 259 + * @ops: object's operations 260 + * @priv: object's priv pointer 261 + * @req: the request this object belongs to (can be NULL) 262 + * @list: List entry of the object for @struct media_request 263 + * @kref: Reference count of the object, acquire before releasing req->lock 264 + * @completed: If true, then this object was completed. 265 + * 266 + * An object related to the request. This struct is always embedded in 267 + * another struct that contains the actual data for this request object. 268 + */ 269 + struct media_request_object { 270 + const struct media_request_object_ops *ops; 271 + void *priv; 272 + struct media_request *req; 273 + struct list_head list; 274 + struct kref kref; 275 + bool completed; 276 + }; 277 + 278 + #ifdef CONFIG_MEDIA_CONTROLLER 279 + 280 + /** 281 + * media_request_object_get - Get a media request object 282 + * 283 + * @obj: The object 284 + * 285 + * Get a media request object. 286 + */ 287 + static inline void media_request_object_get(struct media_request_object *obj) 288 + { 289 + kref_get(&obj->kref); 290 + } 291 + 292 + /** 293 + * media_request_object_put - Put a media request object 294 + * 295 + * @obj: The object 296 + * 297 + * Put a media request object. Once all references are gone, the 298 + * object's memory is released. 299 + */ 300 + void media_request_object_put(struct media_request_object *obj); 301 + 302 + /** 303 + * media_request_object_find - Find an object in a request 304 + * 305 + * @req: The media request 306 + * @ops: Find an object with this ops value 307 + * @priv: Find an object with this priv value 308 + * 309 + * Both @ops and @priv must be non-NULL. 310 + * 311 + * Returns the object pointer or NULL if not found. The caller must 312 + * call media_request_object_put() once it finished using the object. 313 + * 314 + * Since this function needs to walk the list of objects it takes 315 + * the @req->lock spin lock to make this safe. 316 + */ 317 + struct media_request_object * 318 + media_request_object_find(struct media_request *req, 319 + const struct media_request_object_ops *ops, 320 + void *priv); 321 + 322 + /** 323 + * media_request_object_init - Initialise a media request object 324 + * 325 + * @obj: The object 326 + * 327 + * Initialise a media request object. The object will be released using the 328 + * release callback of the ops once it has no references (this function 329 + * initialises references to one). 330 + */ 331 + void media_request_object_init(struct media_request_object *obj); 332 + 333 + /** 334 + * media_request_object_bind - Bind a media request object to a request 335 + * 336 + * @req: The media request 337 + * @ops: The object ops for this object 338 + * @priv: A driver-specific priv pointer associated with this object 339 + * @is_buffer: Set to true if the object a buffer object. 340 + * @obj: The object 341 + * 342 + * Bind this object to the request and set the ops and priv values of 343 + * the object so it can be found later with media_request_object_find(). 344 + * 345 + * Every bound object must be unbound or completed by the kernel at some 346 + * point in time, otherwise the request will never complete. When the 347 + * request is released all completed objects will be unbound by the 348 + * request core code. 349 + * 350 + * Buffer objects will be added to the end of the request's object 351 + * list, non-buffer objects will be added to the front of the list. 352 + * This ensures that all buffer objects are at the end of the list 353 + * and that all non-buffer objects that they depend on are processed 354 + * first. 355 + */ 356 + int media_request_object_bind(struct media_request *req, 357 + const struct media_request_object_ops *ops, 358 + void *priv, bool is_buffer, 359 + struct media_request_object *obj); 360 + 361 + /** 362 + * media_request_object_unbind - Unbind a media request object 363 + * 364 + * @obj: The object 365 + * 366 + * Unbind the media request object from the request. 367 + */ 368 + void media_request_object_unbind(struct media_request_object *obj); 369 + 370 + /** 371 + * media_request_object_complete - Mark the media request object as complete 372 + * 373 + * @obj: The object 374 + * 375 + * Mark the media request object as complete. Only bound objects can 376 + * be completed. 377 + */ 378 + void media_request_object_complete(struct media_request_object *obj); 379 + 380 + #else 381 + 382 + static inline int __must_check 383 + media_request_lock_for_access(struct media_request *req) 384 + { 385 + return -EINVAL; 386 + } 387 + 388 + static inline void media_request_unlock_for_access(struct media_request *req) 389 + { 390 + } 391 + 392 + static inline int __must_check 393 + media_request_lock_for_update(struct media_request *req) 394 + { 395 + return -EINVAL; 396 + } 397 + 398 + static inline void media_request_unlock_for_update(struct media_request *req) 399 + { 400 + } 401 + 402 + static inline void media_request_object_get(struct media_request_object *obj) 403 + { 404 + } 405 + 406 + static inline void media_request_object_put(struct media_request_object *obj) 407 + { 408 + } 409 + 410 + static inline struct media_request_object * 411 + media_request_object_find(struct media_request *req, 412 + const struct media_request_object_ops *ops, 413 + void *priv) 414 + { 415 + return NULL; 416 + } 417 + 418 + static inline void media_request_object_init(struct media_request_object *obj) 419 + { 420 + obj->ops = NULL; 421 + obj->req = NULL; 422 + } 423 + 424 + static inline int media_request_object_bind(struct media_request *req, 425 + const struct media_request_object_ops *ops, 426 + void *priv, bool is_buffer, 427 + struct media_request_object *obj) 428 + { 429 + return 0; 430 + } 431 + 432 + static inline void media_request_object_unbind(struct media_request_object *obj) 433 + { 434 + } 435 + 436 + static inline void media_request_object_complete(struct media_request_object *obj) 437 + { 438 + } 439 + 440 + #endif 441 + 442 + #endif
+132 -9
include/media/v4l2-ctrls.h
··· 20 20 #include <linux/list.h> 21 21 #include <linux/mutex.h> 22 22 #include <linux/videodev2.h> 23 + #include <media/media-request.h> 23 24 24 25 /* forward references */ 25 26 struct file; ··· 35 34 36 35 /** 37 36 * union v4l2_ctrl_ptr - A pointer to a control value. 38 - * @p_s32: Pointer to a 32-bit signed value. 39 - * @p_s64: Pointer to a 64-bit signed value. 40 - * @p_u8: Pointer to a 8-bit unsigned value. 41 - * @p_u16: Pointer to a 16-bit unsigned value. 42 - * @p_u32: Pointer to a 32-bit unsigned value. 43 - * @p_char: Pointer to a string. 44 - * @p: Pointer to a compound value. 37 + * @p_s32: Pointer to a 32-bit signed value. 38 + * @p_s64: Pointer to a 64-bit signed value. 39 + * @p_u8: Pointer to a 8-bit unsigned value. 40 + * @p_u16: Pointer to a 16-bit unsigned value. 41 + * @p_u32: Pointer to a 32-bit unsigned value. 42 + * @p_char: Pointer to a string. 43 + * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure. 44 + * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure. 45 + * @p: Pointer to a compound value. 45 46 */ 46 47 union v4l2_ctrl_ptr { 47 48 s32 *p_s32; ··· 52 49 u16 *p_u16; 53 50 u32 *p_u32; 54 51 char *p_char; 52 + struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; 53 + struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization; 55 54 void *p; 56 55 }; 57 56 ··· 252 247 * @ctrl: The actual control information. 253 248 * @helper: Pointer to helper struct. Used internally in 254 249 * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. 250 + * @from_other_dev: If true, then @ctrl was defined in another 251 + * device than the &struct v4l2_ctrl_handler. 252 + * @req_done: Internal flag: if the control handler containing this control 253 + * reference is bound to a media request, then this is set when 254 + * the control has been applied. This prevents applying controls 255 + * from a cluster with multiple controls twice (when the first 256 + * control of a cluster is applied, they all are). 257 + * @req: If set, this refers to another request that sets this control. 258 + * @p_req: If the control handler containing this control reference 259 + * is bound to a media request, then this points to the 260 + * value of the control that should be applied when the request 261 + * is executed, or to the value of the control at the time 262 + * that the request was completed. 255 263 * 256 264 * Each control handler has a list of these refs. The list_head is used to 257 265 * keep a sorted-by-control-ID list of all controls, while the next pointer ··· 275 257 struct v4l2_ctrl_ref *next; 276 258 struct v4l2_ctrl *ctrl; 277 259 struct v4l2_ctrl_helper *helper; 260 + bool from_other_dev; 261 + bool req_done; 262 + struct v4l2_ctrl_ref *req; 263 + union v4l2_ctrl_ptr p_req; 278 264 }; 279 265 280 266 /** ··· 302 280 * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. 303 281 * @nr_of_buckets: Total number of buckets in the array. 304 282 * @error: The error code of the first failed control addition. 283 + * @request_is_queued: True if the request was queued. 284 + * @requests: List to keep track of open control handler request objects. 285 + * For the parent control handler (@req_obj.req == NULL) this 286 + * is the list header. When the parent control handler is 287 + * removed, it has to unbind and put all these requests since 288 + * they refer to the parent. 289 + * @requests_queued: List of the queued requests. This determines the order 290 + * in which these controls are applied. Once the request is 291 + * completed it is removed from this list. 292 + * @req_obj: The &struct media_request_object, used to link into a 293 + * &struct media_request. This request object has a refcount. 305 294 */ 306 295 struct v4l2_ctrl_handler { 307 296 struct mutex _lock; ··· 325 292 void *notify_priv; 326 293 u16 nr_of_buckets; 327 294 int error; 295 + bool request_is_queued; 296 + struct list_head requests; 297 + struct list_head requests_queued; 298 + struct media_request_object req_obj; 328 299 }; 329 300 330 301 /** ··· 670 633 * @add: The control handler whose controls you want to add to 671 634 * the @hdl control handler. 672 635 * @filter: This function will filter which controls should be added. 636 + * @from_other_dev: If true, then the controls in @add were defined in another 637 + * device than @hdl. 673 638 * 674 639 * Does nothing if either of the two handlers is a NULL pointer. 675 640 * If @filter is NULL, then all controls are added. Otherwise only those ··· 681 642 */ 682 643 int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, 683 644 struct v4l2_ctrl_handler *add, 684 - v4l2_ctrl_filter filter); 645 + v4l2_ctrl_filter filter, 646 + bool from_other_dev); 685 647 686 648 /** 687 649 * v4l2_ctrl_radio_filter() - Standard filter for radio controls. ··· 1110 1070 */ 1111 1071 __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); 1112 1072 1073 + /** 1074 + * v4l2_ctrl_request_setup - helper function to apply control values in a request 1075 + * 1076 + * @req: The request 1077 + * @parent: The parent control handler ('priv' in media_request_object_find()) 1078 + * 1079 + * This is a helper function to call the control handler's s_ctrl callback with 1080 + * the control values contained in the request. Do note that this approach of 1081 + * applying control values in a request is only applicable to memory-to-memory 1082 + * devices. 1083 + */ 1084 + void v4l2_ctrl_request_setup(struct media_request *req, 1085 + struct v4l2_ctrl_handler *parent); 1086 + 1087 + /** 1088 + * v4l2_ctrl_request_complete - Complete a control handler request object 1089 + * 1090 + * @req: The request 1091 + * @parent: The parent control handler ('priv' in media_request_object_find()) 1092 + * 1093 + * This function is to be called on each control handler that may have had a 1094 + * request object associated with it, i.e. control handlers of a driver that 1095 + * supports requests. 1096 + * 1097 + * The function first obtains the values of any volatile controls in the control 1098 + * handler and attach them to the request. Then, the function completes the 1099 + * request object. 1100 + */ 1101 + void v4l2_ctrl_request_complete(struct media_request *req, 1102 + struct v4l2_ctrl_handler *parent); 1103 + 1104 + /** 1105 + * v4l2_ctrl_request_hdl_find - Find the control handler in the request 1106 + * 1107 + * @req: The request 1108 + * @parent: The parent control handler ('priv' in media_request_object_find()) 1109 + * 1110 + * This function finds the control handler in the request. It may return 1111 + * NULL if not found. When done, you must call v4l2_ctrl_request_put_hdl() 1112 + * with the returned handler pointer. 1113 + * 1114 + * If the request is not in state VALIDATING or QUEUED, then this function 1115 + * will always return NULL. 1116 + * 1117 + * Note that in state VALIDATING the req_queue_mutex is held, so 1118 + * no objects can be added or deleted from the request. 1119 + * 1120 + * In state QUEUED it is the driver that will have to ensure this. 1121 + */ 1122 + struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, 1123 + struct v4l2_ctrl_handler *parent); 1124 + 1125 + /** 1126 + * v4l2_ctrl_request_hdl_put - Put the control handler 1127 + * 1128 + * @hdl: Put this control handler 1129 + * 1130 + * This function released the control handler previously obtained from' 1131 + * v4l2_ctrl_request_hdl_find(). 1132 + */ 1133 + static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl) 1134 + { 1135 + if (hdl) 1136 + media_request_object_put(&hdl->req_obj); 1137 + } 1138 + 1139 + /** 1140 + * v4l2_ctrl_request_ctrl_find() - Find a control with the given ID. 1141 + * 1142 + * @hdl: The control handler from the request. 1143 + * @id: The ID of the control to find. 1144 + * 1145 + * This function returns a pointer to the control if this control is 1146 + * part of the request or NULL otherwise. 1147 + */ 1148 + struct v4l2_ctrl * 1149 + v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); 1150 + 1113 1151 /* Helpers for ioctl_ops */ 1114 1152 1115 1153 /** ··· 1254 1136 * :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl 1255 1137 * 1256 1138 * @hdl: pointer to &struct v4l2_ctrl_handler 1139 + * @mdev: pointer to &struct media_device 1257 1140 * @c: pointer to &struct v4l2_ext_controls 1258 1141 * 1259 1142 * If hdl == NULL then they will all return -EINVAL. 1260 1143 */ 1261 - int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, 1144 + int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, 1262 1145 struct v4l2_ext_controls *c); 1263 1146 1264 1147 /** ··· 1267 1148 * :ref:`VIDIOC_TRY_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl 1268 1149 * 1269 1150 * @hdl: pointer to &struct v4l2_ctrl_handler 1151 + * @mdev: pointer to &struct media_device 1270 1152 * @c: pointer to &struct v4l2_ext_controls 1271 1153 * 1272 1154 * If hdl == NULL then they will all return -EINVAL. 1273 1155 */ 1274 1156 int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, 1157 + struct media_device *mdev, 1275 1158 struct v4l2_ext_controls *c); 1276 1159 1277 1160 /** ··· 1282 1161 * 1283 1162 * @fh: pointer to &struct v4l2_fh 1284 1163 * @hdl: pointer to &struct v4l2_ctrl_handler 1164 + * @mdev: pointer to &struct media_device 1285 1165 * @c: pointer to &struct v4l2_ext_controls 1286 1166 * 1287 1167 * If hdl == NULL then they will all return -EINVAL. 1288 1168 */ 1289 1169 int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, 1170 + struct media_device *mdev, 1290 1171 struct v4l2_ext_controls *c); 1291 1172 1292 1173 /**
+11
include/media/v4l2-device.h
··· 211 211 sd->v4l2_dev->notify(sd, notification, arg); 212 212 } 213 213 214 + /** 215 + * v4l2_device_supports_requests - Test if requests are supported. 216 + * 217 + * @v4l2_dev: pointer to struct v4l2_device 218 + */ 219 + static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) 220 + { 221 + return v4l2_dev->mdev && v4l2_dev->mdev->ops && 222 + v4l2_dev->mdev->ops->req_queue; 223 + } 224 + 214 225 /* Helper macros to iterate over all subdevs. */ 215 226 216 227 /**
+4
include/media/v4l2-mem2mem.h
··· 622 622 return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); 623 623 } 624 624 625 + /* v4l2 request helper */ 626 + 627 + void vb2_m2m_request_queue(struct media_request *req); 628 + 625 629 /* v4l2 ioctl helpers */ 626 630 627 631 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+59 -5
include/media/videobuf2-core.h
··· 17 17 #include <linux/poll.h> 18 18 #include <linux/dma-buf.h> 19 19 #include <linux/bitops.h> 20 + #include <media/media-request.h> 20 21 21 22 #define VB2_MAX_FRAME (32) 22 23 #define VB2_MAX_PLANES (8) ··· 204 203 /** 205 204 * enum vb2_buffer_state - current video buffer state. 206 205 * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control. 206 + * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request. 207 207 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf. 208 - * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver. 209 208 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver. 210 209 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver. 211 210 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used ··· 218 217 */ 219 218 enum vb2_buffer_state { 220 219 VB2_BUF_STATE_DEQUEUED, 220 + VB2_BUF_STATE_IN_REQUEST, 221 221 VB2_BUF_STATE_PREPARING, 222 - VB2_BUF_STATE_PREPARED, 223 222 VB2_BUF_STATE_QUEUED, 224 223 VB2_BUF_STATE_REQUEUEING, 225 224 VB2_BUF_STATE_ACTIVE, ··· 239 238 * @num_planes: number of planes in the buffer 240 239 * on an internal driver queue. 241 240 * @timestamp: frame timestamp in ns. 241 + * @req_obj: used to bind this buffer to a request. This 242 + * request object has a refcount. 242 243 */ 243 244 struct vb2_buffer { 244 245 struct vb2_queue *vb2_queue; ··· 249 246 unsigned int memory; 250 247 unsigned int num_planes; 251 248 u64 timestamp; 249 + struct media_request_object req_obj; 252 250 253 251 /* private: internal use only 254 252 * 255 253 * state: current buffer state; do not change 254 + * synced: this buffer has been synced for DMA, i.e. the 255 + * 'prepare' memop was called. It is cleared again 256 + * after the 'finish' memop is called. 257 + * prepared: this buffer has been prepared, i.e. the 258 + * buf_prepare op was called. It is cleared again 259 + * after the 'buf_finish' op is called. 256 260 * queued_entry: entry on the queued buffers list, which holds 257 261 * all buffers queued from userspace 258 262 * done_entry: entry on the list that stores all buffers ready ··· 267 257 * vb2_plane: per-plane information; do not change 268 258 */ 269 259 enum vb2_buffer_state state; 260 + bool synced; 261 + bool prepared; 270 262 271 263 struct vb2_plane planes[VB2_MAX_PLANES]; 272 264 struct list_head queued_entry; ··· 299 287 u32 cnt_buf_finish; 300 288 u32 cnt_buf_cleanup; 301 289 u32 cnt_buf_queue; 290 + u32 cnt_buf_request_complete; 302 291 303 292 /* This counts the number of calls to vb2_buffer_done() */ 304 293 u32 cnt_buf_done; ··· 393 380 * ioctl; might be called before @start_streaming callback 394 381 * if user pre-queued buffers before calling 395 382 * VIDIOC_STREAMON(). 383 + * @buf_request_complete: a buffer that was never queued to the driver but is 384 + * associated with a queued request was canceled. 385 + * The driver will have to mark associated objects in the 386 + * request as completed; required if requests are 387 + * supported. 396 388 */ 397 389 struct vb2_ops { 398 390 int (*queue_setup)(struct vb2_queue *q, ··· 416 398 void (*stop_streaming)(struct vb2_queue *q); 417 399 418 400 void (*buf_queue)(struct vb2_buffer *vb); 401 + 402 + void (*buf_request_complete)(struct vb2_buffer *vb); 419 403 }; 420 404 421 405 /** ··· 426 406 * @verify_planes_array: Verify that a given user space structure contains 427 407 * enough planes for the buffer. This is called 428 408 * for each dequeued buffer. 409 + * @init_buffer: given a &vb2_buffer initialize the extra data after 410 + * struct vb2_buffer. 411 + * For V4L2 this is a &struct vb2_v4l2_buffer. 429 412 * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure. 430 413 * For V4L2 this is a &struct v4l2_buffer. 431 414 * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer. ··· 439 416 */ 440 417 struct vb2_buf_ops { 441 418 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); 419 + void (*init_buffer)(struct vb2_buffer *vb); 442 420 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); 443 - int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, 444 - struct vb2_plane *planes); 421 + int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes); 445 422 void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb); 446 423 }; 447 424 ··· 472 449 * @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF 473 450 * has not been called. This is a vb1 idiom that has been adopted 474 451 * also by vb2. 452 + * @supports_requests: this queue supports the Request API. 453 + * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first 454 + * time this is called. Set to 0 when the queue is canceled. 455 + * If this is 1, then you cannot queue buffers from a request. 456 + * @uses_requests: requests are used for this queue. Set to 1 the first time 457 + * a request is queued. Set to 0 when the queue is canceled. 458 + * If this is 1, then you cannot queue buffers directly. 475 459 * @lock: pointer to a mutex that protects the &struct vb2_queue. The 476 460 * driver can set this to a mutex to let the v4l2 core serialize 477 461 * the queuing ioctls. If the driver wants to handle locking ··· 546 516 unsigned fileio_write_immediately:1; 547 517 unsigned allow_zero_bytesused:1; 548 518 unsigned quirk_poll_must_check_waiting_for_buffers:1; 519 + unsigned supports_requests:1; 520 + unsigned uses_qbuf:1; 521 + unsigned uses_requests:1; 549 522 550 523 struct mutex *lock; 551 524 void *owner; ··· 785 752 * @index: id number of the buffer 786 753 * @pb: buffer structure passed from userspace to 787 754 * v4l2_ioctl_ops->vidioc_qbuf handler in driver 755 + * @req: pointer to &struct media_request, may be NULL. 788 756 * 789 757 * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called 790 758 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. 791 759 * 792 760 * This function: 793 761 * 762 + * #) If @req is non-NULL, then the buffer will be bound to this 763 + * media request and it returns. The buffer will be prepared and 764 + * queued to the driver (i.e. the next two steps) when the request 765 + * itself is queued. 794 766 * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver 795 767 * (if provided), in which driver-specific buffer initialization can 796 768 * be performed; ··· 804 766 * 805 767 * Return: returns zero on success; an error code otherwise. 806 768 */ 807 - int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); 769 + int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 770 + struct media_request *req); 808 771 809 772 /** 810 773 * vb2_core_dqbuf() - Dequeue a buffer to the userspace ··· 1182 1143 */ 1183 1144 int vb2_verify_memory_type(struct vb2_queue *q, 1184 1145 enum vb2_memory memory, unsigned int type); 1146 + 1147 + /** 1148 + * vb2_request_object_is_buffer() - return true if the object is a buffer 1149 + * 1150 + * @obj: the request object. 1151 + */ 1152 + bool vb2_request_object_is_buffer(struct media_request_object *obj); 1153 + 1154 + /** 1155 + * vb2_request_buffer_cnt() - return the number of buffers in the request 1156 + * 1157 + * @req: the request. 1158 + */ 1159 + unsigned int vb2_request_buffer_cnt(struct media_request *req); 1160 + 1185 1161 #endif /* _MEDIA_VIDEOBUF2_CORE_H */
+18 -2
include/media/videobuf2-v4l2.h
··· 32 32 * &enum v4l2_field. 33 33 * @timecode: frame timecode. 34 34 * @sequence: sequence count of this frame. 35 + * @request_fd: the request_fd associated with this buffer 36 + * @planes: plane information (userptr/fd, length, bytesused, data_offset). 35 37 * 36 38 * Should contain enough information to be able to cover all the fields 37 39 * of &struct v4l2_buffer at ``videodev2.h``. ··· 45 43 __u32 field; 46 44 struct v4l2_timecode timecode; 47 45 __u32 sequence; 46 + __s32 request_fd; 47 + struct vb2_plane planes[VB2_MAX_PLANES]; 48 48 }; 49 49 50 50 /* ··· 81 77 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel 82 78 * 83 79 * @q: pointer to &struct vb2_queue with videobuf2 queue. 80 + * @mdev: pointer to &struct media_device, may be NULL. 84 81 * @b: buffer structure passed from userspace to 85 82 * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver 86 83 * ··· 93 88 * #) verifies the passed buffer, 94 89 * #) calls &vb2_ops->buf_prepare callback in the driver (if provided), 95 90 * in which driver-specific buffer initialization can be performed. 91 + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, 92 + * then bind the prepared buffer to the request. 96 93 * 97 94 * The return values from this function are intended to be directly returned 98 95 * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. 99 96 */ 100 - int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); 97 + int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 98 + struct v4l2_buffer *b); 101 99 102 100 /** 103 101 * vb2_qbuf() - Queue a buffer from userspace 104 102 * @q: pointer to &struct vb2_queue with videobuf2 queue. 103 + * @mdev: pointer to &struct media_device, may be NULL. 105 104 * @b: buffer structure passed from userspace to 106 105 * &v4l2_ioctl_ops->vidioc_qbuf handler in driver 107 106 * ··· 114 105 * This function: 115 106 * 116 107 * #) verifies the passed buffer; 108 + * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set, 109 + * then bind the buffer to the request. 117 110 * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver 118 111 * (if provided), in which driver-specific buffer initialization can 119 112 * be performed; ··· 125 114 * The return values from this function are intended to be directly returned 126 115 * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver. 127 116 */ 128 - int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); 117 + int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, 118 + struct v4l2_buffer *b); 129 119 130 120 /** 131 121 * vb2_expbuf() - Export a buffer as a file descriptor ··· 302 290 * ..note:: only use if vq->lock is non-NULL. 303 291 */ 304 292 void vb2_ops_wait_finish(struct vb2_queue *vq); 293 + 294 + struct media_request; 295 + int vb2_request_validate(struct media_request *req); 296 + void vb2_request_queue(struct media_request *req); 305 297 306 298 #endif /* _MEDIA_VIDEOBUF2_V4L2_H */
+8
include/uapi/linux/media.h
··· 369 369 #define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum) 370 370 #define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) 371 371 #define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) 372 + #define MEDIA_IOC_REQUEST_ALLOC _IOR ('|', 0x05, int) 373 + 374 + /* 375 + * These ioctls are called on the request file descriptor as returned 376 + * by MEDIA_IOC_REQUEST_ALLOC. 377 + */ 378 + #define MEDIA_REQUEST_IOC_QUEUE _IO('|', 0x80) 379 + #define MEDIA_REQUEST_IOC_REINIT _IO('|', 0x81) 372 380 373 381 #ifndef __KERNEL__ 374 382
+65
include/uapi/linux/v4l2-controls.h
··· 402 402 #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) 403 403 #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) 404 404 405 + #define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) 406 + #define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) 407 + 405 408 #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) 406 409 #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) 407 410 #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) ··· 1094 1091 #define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (V4L2_CID_DETECT_CLASS_BASE + 2) 1095 1092 #define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) 1096 1093 #define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) 1094 + 1095 + #define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 1096 + #define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 1097 + #define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 1098 + #define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 1099 + 1100 + struct v4l2_mpeg2_sequence { 1101 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ 1102 + __u16 horizontal_size; 1103 + __u16 vertical_size; 1104 + __u32 vbv_buffer_size; 1105 + 1106 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ 1107 + __u8 profile_and_level_indication; 1108 + __u8 progressive_sequence; 1109 + __u8 chroma_format; 1110 + }; 1111 + 1112 + struct v4l2_mpeg2_picture { 1113 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ 1114 + __u8 picture_coding_type; 1115 + 1116 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ 1117 + __u8 f_code[2][2]; 1118 + __u8 intra_dc_precision; 1119 + __u8 picture_structure; 1120 + __u8 top_field_first; 1121 + __u8 frame_pred_frame_dct; 1122 + __u8 concealment_motion_vectors; 1123 + __u8 q_scale_type; 1124 + __u8 intra_vlc_format; 1125 + __u8 alternate_scan; 1126 + __u8 repeat_first_field; 1127 + __u8 progressive_frame; 1128 + }; 1129 + 1130 + struct v4l2_ctrl_mpeg2_slice_params { 1131 + __u32 bit_size; 1132 + __u32 data_bit_offset; 1133 + 1134 + struct v4l2_mpeg2_sequence sequence; 1135 + struct v4l2_mpeg2_picture picture; 1136 + 1137 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ 1138 + __u8 quantiser_scale_code; 1139 + 1140 + __u8 backward_ref_index; 1141 + __u8 forward_ref_index; 1142 + }; 1143 + 1144 + struct v4l2_ctrl_mpeg2_quantization { 1145 + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ 1146 + __u8 load_intra_quantiser_matrix; 1147 + __u8 load_non_intra_quantiser_matrix; 1148 + __u8 load_chroma_intra_quantiser_matrix; 1149 + __u8 load_chroma_non_intra_quantiser_matrix; 1150 + 1151 + __u8 intra_quantiser_matrix[64]; 1152 + __u8 non_intra_quantiser_matrix[64]; 1153 + __u8 chroma_intra_quantiser_matrix[64]; 1154 + __u8 chroma_non_intra_quantiser_matrix[64]; 1155 + }; 1097 1156 1098 1157 #endif
+29 -4
include/uapi/linux/videodev2.h
··· 646 646 #define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */ 647 647 #define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */ 648 648 #define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */ 649 + #define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */ 649 650 #define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */ 650 651 #define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */ 651 652 #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */ ··· 688 687 #define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */ 689 688 #define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ 690 689 #define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ 690 + #define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */ 691 691 692 692 /* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ 693 693 #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ ··· 870 868 __u32 count; 871 869 __u32 type; /* enum v4l2_buf_type */ 872 870 __u32 memory; /* enum v4l2_memory */ 873 - __u32 reserved[2]; 871 + __u32 capabilities; 872 + __u32 reserved[1]; 874 873 }; 874 + 875 + /* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */ 876 + #define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0) 877 + #define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1) 878 + #define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2) 879 + #define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3) 875 880 876 881 /** 877 882 * struct v4l2_plane - plane info for multi-planar buffers ··· 938 929 * @length: size in bytes of the buffer (NOT its payload) for single-plane 939 930 * buffers (when type != *_MPLANE); number of elements in the 940 931 * planes array for multi-plane buffers 932 + * @request_fd: fd of the request that this buffer should use 941 933 * 942 934 * Contains data exchanged by application and driver using one of the Streaming 943 935 * I/O methods. ··· 963 953 } m; 964 954 __u32 length; 965 955 __u32 reserved2; 966 - __u32 reserved; 956 + union { 957 + __s32 request_fd; 958 + __u32 reserved; 959 + }; 967 960 }; 968 961 969 962 /* Flags for 'flags' field */ ··· 984 971 #define V4L2_BUF_FLAG_BFRAME 0x00000020 985 972 /* Buffer is ready, but the data contained within is corrupted. */ 986 973 #define V4L2_BUF_FLAG_ERROR 0x00000040 974 + /* Buffer is added to an unqueued request */ 975 + #define V4L2_BUF_FLAG_IN_REQUEST 0x00000080 987 976 /* timecode field is valid */ 988 977 #define V4L2_BUF_FLAG_TIMECODE 0x00000100 989 978 /* Buffer is prepared for queuing */ ··· 1004 989 #define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000 1005 990 /* mem2mem encoder/decoder */ 1006 991 #define V4L2_BUF_FLAG_LAST 0x00100000 992 + /* request_fd is valid */ 993 + #define V4L2_BUF_FLAG_REQUEST_FD 0x00800000 1007 994 1008 995 /** 1009 996 * struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor ··· 1622 1605 __u8 __user *p_u8; 1623 1606 __u16 __user *p_u16; 1624 1607 __u32 __user *p_u32; 1608 + struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params; 1609 + struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization; 1625 1610 void __user *ptr; 1626 1611 }; 1627 1612 } __attribute__ ((packed)); ··· 1637 1618 }; 1638 1619 __u32 count; 1639 1620 __u32 error_idx; 1640 - __u32 reserved[2]; 1621 + __s32 request_fd; 1622 + __u32 reserved[1]; 1641 1623 struct v4l2_ext_control *controls; 1642 1624 }; 1643 1625 ··· 1651 1631 #define V4L2_CTRL_MAX_DIMS (4) 1652 1632 #define V4L2_CTRL_WHICH_CUR_VAL 0 1653 1633 #define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000 1634 + #define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000 1654 1635 1655 1636 enum v4l2_ctrl_type { 1656 1637 V4L2_CTRL_TYPE_INTEGER = 1, ··· 1669 1648 V4L2_CTRL_TYPE_U8 = 0x0100, 1670 1649 V4L2_CTRL_TYPE_U16 = 0x0101, 1671 1650 V4L2_CTRL_TYPE_U32 = 0x0102, 1651 + V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103, 1652 + V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104, 1672 1653 }; 1673 1654 1674 1655 /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ ··· 2344 2321 * return: number of created buffers 2345 2322 * @memory: enum v4l2_memory; buffer memory type 2346 2323 * @format: frame format, for which buffers are requested 2324 + * @capabilities: capabilities of this buffer type. 2347 2325 * @reserved: future extensions 2348 2326 */ 2349 2327 struct v4l2_create_buffers { ··· 2352 2328 __u32 count; 2353 2329 __u32 memory; 2354 2330 struct v4l2_format format; 2355 - __u32 reserved[8]; 2331 + __u32 capabilities; 2332 + __u32 reserved[7]; 2356 2333 }; 2357 2334 2358 2335 /*