Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[media] omap3isp: Statistics

The OMAP3 ISP statistics entities compute histogram and H3A statistics
information from capture images.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Sakari Ailus <sakari.ailus@iki.fi>
Signed-off-by: David Cohen <dacohen@gmail.com>
Signed-off-by: Stanimir Varbanov <svarbanov@mm-sol.com>
Signed-off-by: Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
Signed-off-by: Tuukka Toivonen <tuukkat76@gmail.com>
Signed-off-by: Sergio Aguirre <saaguirre@ti.com>
Signed-off-by: Antti Koskipaa <akoskipa@gmail.com>
Signed-off-by: Ivan T. Ivanov <iivanov@mm-sol.com>
Signed-off-by: RaniSuneela <r-m@ti.com>
Signed-off-by: Atanas Filipov <afilipov@mm-sol.com>
Signed-off-by: Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
Signed-off-by: Nayden Kanchev <nkanchev@mm-sol.com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: Dominic Curran <dcurran@ti.com>
Signed-off-by: Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
Signed-off-by: Pallavi Kulkarni <p-kulkarni@ti.com>
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
Acked-by: Hans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>

authored by

David Cohen and committed by
Mauro Carvalho Chehab
68e342b3 de1135d4

+2741
+117
drivers/media/video/omap3isp/isph3a.h
··· 1 + /* 2 + * isph3a.h 3 + * 4 + * TI OMAP3 ISP - H3A AF module 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc. 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #ifndef OMAP3_ISP_H3A_H 29 + #define OMAP3_ISP_H3A_H 30 + 31 + #include <linux/omap3isp.h> 32 + 33 + /* 34 + * ---------- 35 + * -H3A AEWB- 36 + * ---------- 37 + */ 38 + 39 + #define AEWB_PACKET_SIZE 16 40 + #define AEWB_SATURATION_LIMIT 0x3ff 41 + 42 + /* Flags for changed registers */ 43 + #define PCR_CHNG (1 << 0) 44 + #define AEWWIN1_CHNG (1 << 1) 45 + #define AEWINSTART_CHNG (1 << 2) 46 + #define AEWINBLK_CHNG (1 << 3) 47 + #define AEWSUBWIN_CHNG (1 << 4) 48 + #define PRV_WBDGAIN_CHNG (1 << 5) 49 + #define PRV_WBGAIN_CHNG (1 << 6) 50 + 51 + /* ISPH3A REGISTERS bits */ 52 + #define ISPH3A_PCR_AF_EN (1 << 0) 53 + #define ISPH3A_PCR_AF_ALAW_EN (1 << 1) 54 + #define ISPH3A_PCR_AF_MED_EN (1 << 2) 55 + #define ISPH3A_PCR_AF_BUSY (1 << 15) 56 + #define ISPH3A_PCR_AEW_EN (1 << 16) 57 + #define ISPH3A_PCR_AEW_ALAW_EN (1 << 17) 58 + #define ISPH3A_PCR_AEW_BUSY (1 << 18) 59 + #define ISPH3A_PCR_AEW_MASK (ISPH3A_PCR_AEW_ALAW_EN | \ 60 + ISPH3A_PCR_AEW_AVE2LMT_MASK) 61 + 62 + /* 63 + * -------- 64 + * -H3A AF- 65 + * -------- 66 + */ 67 + 68 + /* Peripheral Revision */ 69 + #define AFPID 0x0 70 + 71 + #define AFCOEF_OFFSET 0x00000004 /* COEF base address */ 72 + 73 + /* PCR fields */ 74 + #define AF_BUSYAF (1 << 15) 75 + #define AF_FVMODE (1 << 14) 76 + #define AF_RGBPOS (0x7 << 11) 77 + #define AF_MED_TH (0xFF << 3) 78 + #define AF_MED_EN (1 << 2) 79 + #define AF_ALAW_EN (1 << 1) 80 + #define AF_EN (1 << 0) 81 + #define AF_PCR_MASK (AF_FVMODE | AF_RGBPOS | AF_MED_TH | \ 82 + AF_MED_EN | AF_ALAW_EN) 83 + 84 + /* AFPAX1 fields */ 85 + #define AF_PAXW (0x7F << 16) 86 + #define AF_PAXH 0x7F 87 + 88 + /* AFPAX2 fields */ 89 + #define AF_AFINCV (0xF << 13) 90 + #define AF_PAXVC (0x7F << 6) 91 + #define AF_PAXHC 0x3F 92 + 93 + /* AFPAXSTART fields */ 94 + #define AF_PAXSH (0xFFF<<16) 95 + #define AF_PAXSV 0xFFF 96 + 97 + /* COEFFICIENT MASK */ 98 + #define AF_COEF_MASK0 0xFFF 99 + #define AF_COEF_MASK1 (0xFFF<<16) 100 + 101 + /* BIT SHIFTS */ 102 + #define AF_RGBPOS_SHIFT 11 103 + #define AF_MED_TH_SHIFT 3 104 + #define AF_PAXW_SHIFT 16 105 + #define AF_LINE_INCR_SHIFT 13 106 + #define AF_VT_COUNT_SHIFT 6 107 + #define AF_HZ_START_SHIFT 16 108 + #define AF_COEF_SHIFT 16 109 + 110 + /* Init and cleanup functions */ 111 + int omap3isp_h3a_aewb_init(struct isp_device *isp); 112 + int omap3isp_h3a_af_init(struct isp_device *isp); 113 + 114 + void omap3isp_h3a_aewb_cleanup(struct isp_device *isp); 115 + void omap3isp_h3a_af_cleanup(struct isp_device *isp); 116 + 117 + #endif /* OMAP3_ISP_H3A_H */
+374
drivers/media/video/omap3isp/isph3a_aewb.c
··· 1 + /* 2 + * isph3a.c 3 + * 4 + * TI OMAP3 ISP - H3A module 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc. 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #include <linux/slab.h> 29 + #include <linux/uaccess.h> 30 + 31 + #include "isp.h" 32 + #include "isph3a.h" 33 + #include "ispstat.h" 34 + 35 + /* 36 + * h3a_aewb_update_regs - Helper function to update h3a registers. 37 + */ 38 + static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv) 39 + { 40 + struct omap3isp_h3a_aewb_config *conf = priv; 41 + u32 pcr; 42 + u32 win1; 43 + u32 start; 44 + u32 blk; 45 + u32 subwin; 46 + 47 + if (aewb->state == ISPSTAT_DISABLED) 48 + return; 49 + 50 + isp_reg_writel(aewb->isp, aewb->active_buf->iommu_addr, 51 + OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); 52 + 53 + if (!aewb->update) 54 + return; 55 + 56 + /* Converting config metadata into reg values */ 57 + pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT; 58 + pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT; 59 + 60 + win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT; 61 + win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT; 62 + win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT; 63 + win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT; 64 + 65 + start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT; 66 + start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT; 67 + 68 + blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT; 69 + blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT; 70 + 71 + subwin = ((conf->subsample_ver_inc >> 1) - 1) << 72 + ISPH3A_AEWSUBWIN_AEWINCV_SHIFT; 73 + subwin |= ((conf->subsample_hor_inc >> 1) - 1) << 74 + ISPH3A_AEWSUBWIN_AEWINCH_SHIFT; 75 + 76 + isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1); 77 + isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A, 78 + ISPH3A_AEWINSTART); 79 + isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK); 80 + isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A, 81 + ISPH3A_AEWSUBWIN); 82 + isp_reg_clr_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 83 + ISPH3A_PCR_AEW_MASK, pcr); 84 + 85 + aewb->update = 0; 86 + aewb->config_counter += aewb->inc_config; 87 + aewb->inc_config = 0; 88 + aewb->buf_size = conf->buf_size; 89 + } 90 + 91 + static void h3a_aewb_enable(struct ispstat *aewb, int enable) 92 + { 93 + if (enable) { 94 + isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 95 + ISPH3A_PCR_AEW_EN); 96 + /* This bit is already set if AF is enabled */ 97 + if (aewb->isp->isp_af.state != ISPSTAT_ENABLED) 98 + isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 99 + ISPCTRL_H3A_CLK_EN); 100 + } else { 101 + isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 102 + ISPH3A_PCR_AEW_EN); 103 + /* This bit can't be cleared if AF is enabled */ 104 + if (aewb->isp->isp_af.state != ISPSTAT_ENABLED) 105 + isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 106 + ISPCTRL_H3A_CLK_EN); 107 + } 108 + } 109 + 110 + static int h3a_aewb_busy(struct ispstat *aewb) 111 + { 112 + return isp_reg_readl(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) 113 + & ISPH3A_PCR_BUSYAEAWB; 114 + } 115 + 116 + static u32 h3a_aewb_get_buf_size(struct omap3isp_h3a_aewb_config *conf) 117 + { 118 + /* Number of configured windows + extra row for black data */ 119 + u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count; 120 + 121 + /* 122 + * Unsaturated block counts for each 8 windows. 123 + * 1 extra for the last (win_count % 8) windows if win_count is not 124 + * divisible by 8. 125 + */ 126 + win_count += (win_count + 7) / 8; 127 + 128 + return win_count * AEWB_PACKET_SIZE; 129 + } 130 + 131 + static int h3a_aewb_validate_params(struct ispstat *aewb, void *new_conf) 132 + { 133 + struct omap3isp_h3a_aewb_config *user_cfg = new_conf; 134 + u32 buf_size; 135 + 136 + if (unlikely(user_cfg->saturation_limit > 137 + OMAP3ISP_AEWB_MAX_SATURATION_LIM)) 138 + return -EINVAL; 139 + 140 + if (unlikely(user_cfg->win_height < OMAP3ISP_AEWB_MIN_WIN_H || 141 + user_cfg->win_height > OMAP3ISP_AEWB_MAX_WIN_H || 142 + user_cfg->win_height & 0x01)) 143 + return -EINVAL; 144 + 145 + if (unlikely(user_cfg->win_width < OMAP3ISP_AEWB_MIN_WIN_W || 146 + user_cfg->win_width > OMAP3ISP_AEWB_MAX_WIN_W || 147 + user_cfg->win_width & 0x01)) 148 + return -EINVAL; 149 + 150 + if (unlikely(user_cfg->ver_win_count < OMAP3ISP_AEWB_MIN_WINVC || 151 + user_cfg->ver_win_count > OMAP3ISP_AEWB_MAX_WINVC)) 152 + return -EINVAL; 153 + 154 + if (unlikely(user_cfg->hor_win_count < OMAP3ISP_AEWB_MIN_WINHC || 155 + user_cfg->hor_win_count > OMAP3ISP_AEWB_MAX_WINHC)) 156 + return -EINVAL; 157 + 158 + if (unlikely(user_cfg->ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) 159 + return -EINVAL; 160 + 161 + if (unlikely(user_cfg->hor_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) 162 + return -EINVAL; 163 + 164 + if (unlikely(user_cfg->blk_ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) 165 + return -EINVAL; 166 + 167 + if (unlikely(user_cfg->blk_win_height < OMAP3ISP_AEWB_MIN_WIN_H || 168 + user_cfg->blk_win_height > OMAP3ISP_AEWB_MAX_WIN_H || 169 + user_cfg->blk_win_height & 0x01)) 170 + return -EINVAL; 171 + 172 + if (unlikely(user_cfg->subsample_ver_inc < OMAP3ISP_AEWB_MIN_SUB_INC || 173 + user_cfg->subsample_ver_inc > OMAP3ISP_AEWB_MAX_SUB_INC || 174 + user_cfg->subsample_ver_inc & 0x01)) 175 + return -EINVAL; 176 + 177 + if (unlikely(user_cfg->subsample_hor_inc < OMAP3ISP_AEWB_MIN_SUB_INC || 178 + user_cfg->subsample_hor_inc > OMAP3ISP_AEWB_MAX_SUB_INC || 179 + user_cfg->subsample_hor_inc & 0x01)) 180 + return -EINVAL; 181 + 182 + buf_size = h3a_aewb_get_buf_size(user_cfg); 183 + if (buf_size > user_cfg->buf_size) 184 + user_cfg->buf_size = buf_size; 185 + else if (user_cfg->buf_size > OMAP3ISP_AEWB_MAX_BUF_SIZE) 186 + user_cfg->buf_size = OMAP3ISP_AEWB_MAX_BUF_SIZE; 187 + 188 + return 0; 189 + } 190 + 191 + /* 192 + * h3a_aewb_set_params - Helper function to check & store user given params. 193 + * @new_conf: Pointer to AE and AWB parameters struct. 194 + * 195 + * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to 196 + * program them during ISR. 197 + */ 198 + static void h3a_aewb_set_params(struct ispstat *aewb, void *new_conf) 199 + { 200 + struct omap3isp_h3a_aewb_config *user_cfg = new_conf; 201 + struct omap3isp_h3a_aewb_config *cur_cfg = aewb->priv; 202 + int update = 0; 203 + 204 + if (cur_cfg->saturation_limit != user_cfg->saturation_limit) { 205 + cur_cfg->saturation_limit = user_cfg->saturation_limit; 206 + update = 1; 207 + } 208 + if (cur_cfg->alaw_enable != user_cfg->alaw_enable) { 209 + cur_cfg->alaw_enable = user_cfg->alaw_enable; 210 + update = 1; 211 + } 212 + if (cur_cfg->win_height != user_cfg->win_height) { 213 + cur_cfg->win_height = user_cfg->win_height; 214 + update = 1; 215 + } 216 + if (cur_cfg->win_width != user_cfg->win_width) { 217 + cur_cfg->win_width = user_cfg->win_width; 218 + update = 1; 219 + } 220 + if (cur_cfg->ver_win_count != user_cfg->ver_win_count) { 221 + cur_cfg->ver_win_count = user_cfg->ver_win_count; 222 + update = 1; 223 + } 224 + if (cur_cfg->hor_win_count != user_cfg->hor_win_count) { 225 + cur_cfg->hor_win_count = user_cfg->hor_win_count; 226 + update = 1; 227 + } 228 + if (cur_cfg->ver_win_start != user_cfg->ver_win_start) { 229 + cur_cfg->ver_win_start = user_cfg->ver_win_start; 230 + update = 1; 231 + } 232 + if (cur_cfg->hor_win_start != user_cfg->hor_win_start) { 233 + cur_cfg->hor_win_start = user_cfg->hor_win_start; 234 + update = 1; 235 + } 236 + if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) { 237 + cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start; 238 + update = 1; 239 + } 240 + if (cur_cfg->blk_win_height != user_cfg->blk_win_height) { 241 + cur_cfg->blk_win_height = user_cfg->blk_win_height; 242 + update = 1; 243 + } 244 + if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) { 245 + cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc; 246 + update = 1; 247 + } 248 + if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) { 249 + cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc; 250 + update = 1; 251 + } 252 + 253 + if (update || !aewb->configured) { 254 + aewb->inc_config++; 255 + aewb->update = 1; 256 + cur_cfg->buf_size = h3a_aewb_get_buf_size(cur_cfg); 257 + } 258 + } 259 + 260 + static long h3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) 261 + { 262 + struct ispstat *stat = v4l2_get_subdevdata(sd); 263 + 264 + switch (cmd) { 265 + case VIDIOC_OMAP3ISP_AEWB_CFG: 266 + return omap3isp_stat_config(stat, arg); 267 + case VIDIOC_OMAP3ISP_STAT_REQ: 268 + return omap3isp_stat_request_statistics(stat, arg); 269 + case VIDIOC_OMAP3ISP_STAT_EN: { 270 + unsigned long *en = arg; 271 + return omap3isp_stat_enable(stat, !!*en); 272 + } 273 + } 274 + 275 + return -ENOIOCTLCMD; 276 + } 277 + 278 + static const struct ispstat_ops h3a_aewb_ops = { 279 + .validate_params = h3a_aewb_validate_params, 280 + .set_params = h3a_aewb_set_params, 281 + .setup_regs = h3a_aewb_setup_regs, 282 + .enable = h3a_aewb_enable, 283 + .busy = h3a_aewb_busy, 284 + }; 285 + 286 + static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = { 287 + .ioctl = h3a_aewb_ioctl, 288 + .subscribe_event = omap3isp_stat_subscribe_event, 289 + .unsubscribe_event = omap3isp_stat_unsubscribe_event, 290 + }; 291 + 292 + static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = { 293 + .s_stream = omap3isp_stat_s_stream, 294 + }; 295 + 296 + static const struct v4l2_subdev_ops h3a_aewb_subdev_ops = { 297 + .core = &h3a_aewb_subdev_core_ops, 298 + .video = &h3a_aewb_subdev_video_ops, 299 + }; 300 + 301 + /* 302 + * omap3isp_h3a_aewb_init - Module Initialisation. 303 + */ 304 + int omap3isp_h3a_aewb_init(struct isp_device *isp) 305 + { 306 + struct ispstat *aewb = &isp->isp_aewb; 307 + struct omap3isp_h3a_aewb_config *aewb_cfg; 308 + struct omap3isp_h3a_aewb_config *aewb_recover_cfg; 309 + int ret; 310 + 311 + aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL); 312 + if (!aewb_cfg) 313 + return -ENOMEM; 314 + 315 + memset(aewb, 0, sizeof(*aewb)); 316 + aewb->ops = &h3a_aewb_ops; 317 + aewb->priv = aewb_cfg; 318 + aewb->dma_ch = -1; 319 + aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB; 320 + aewb->isp = isp; 321 + 322 + /* Set recover state configuration */ 323 + aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL); 324 + if (!aewb_recover_cfg) { 325 + dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for " 326 + "recover configuration.\n"); 327 + ret = -ENOMEM; 328 + goto err_recover_alloc; 329 + } 330 + 331 + aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM; 332 + aewb_recover_cfg->win_height = OMAP3ISP_AEWB_MIN_WIN_H; 333 + aewb_recover_cfg->win_width = OMAP3ISP_AEWB_MIN_WIN_W; 334 + aewb_recover_cfg->ver_win_count = OMAP3ISP_AEWB_MIN_WINVC; 335 + aewb_recover_cfg->hor_win_count = OMAP3ISP_AEWB_MIN_WINHC; 336 + aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start + 337 + aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count; 338 + aewb_recover_cfg->blk_win_height = OMAP3ISP_AEWB_MIN_WIN_H; 339 + aewb_recover_cfg->subsample_ver_inc = OMAP3ISP_AEWB_MIN_SUB_INC; 340 + aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC; 341 + 342 + if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) { 343 + dev_err(aewb->isp->dev, "AEWB: recover configuration is " 344 + "invalid.\n"); 345 + ret = -EINVAL; 346 + goto err_conf; 347 + } 348 + 349 + aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg); 350 + aewb->recover_priv = aewb_recover_cfg; 351 + 352 + ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops); 353 + if (ret) 354 + goto err_conf; 355 + 356 + return 0; 357 + 358 + err_conf: 359 + kfree(aewb_recover_cfg); 360 + err_recover_alloc: 361 + kfree(aewb_cfg); 362 + 363 + return ret; 364 + } 365 + 366 + /* 367 + * omap3isp_h3a_aewb_cleanup - Module exit. 368 + */ 369 + void omap3isp_h3a_aewb_cleanup(struct isp_device *isp) 370 + { 371 + kfree(isp->isp_aewb.priv); 372 + kfree(isp->isp_aewb.recover_priv); 373 + omap3isp_stat_free(&isp->isp_aewb); 374 + }
+429
drivers/media/video/omap3isp/isph3a_af.c
··· 1 + /* 2 + * isph3a_af.c 3 + * 4 + * TI OMAP3 ISP - H3A AF module 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc. 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + /* Linux specific include files */ 29 + #include <linux/device.h> 30 + #include <linux/slab.h> 31 + 32 + #include "isp.h" 33 + #include "isph3a.h" 34 + #include "ispstat.h" 35 + 36 + #define IS_OUT_OF_BOUNDS(value, min, max) \ 37 + (((value) < (min)) || ((value) > (max))) 38 + 39 + static void h3a_af_setup_regs(struct ispstat *af, void *priv) 40 + { 41 + struct omap3isp_h3a_af_config *conf = priv; 42 + u32 pcr; 43 + u32 pax1; 44 + u32 pax2; 45 + u32 paxstart; 46 + u32 coef; 47 + u32 base_coef_set0; 48 + u32 base_coef_set1; 49 + int index; 50 + 51 + if (af->state == ISPSTAT_DISABLED) 52 + return; 53 + 54 + isp_reg_writel(af->isp, af->active_buf->iommu_addr, OMAP3_ISP_IOMEM_H3A, 55 + ISPH3A_AFBUFST); 56 + 57 + if (!af->update) 58 + return; 59 + 60 + /* Configure Hardware Registers */ 61 + pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT; 62 + /* Set height in AFPAX1 */ 63 + pax1 |= (conf->paxel.height >> 1) - 1; 64 + isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1); 65 + 66 + /* Configure AFPAX2 Register */ 67 + /* Set Line Increment in AFPAX2 Register */ 68 + pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT; 69 + /* Set Vertical Count */ 70 + pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT; 71 + /* Set Horizontal Count */ 72 + pax2 |= (conf->paxel.h_cnt - 1); 73 + isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2); 74 + 75 + /* Configure PAXSTART Register */ 76 + /*Configure Horizontal Start */ 77 + paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT; 78 + /* Configure Vertical Start */ 79 + paxstart |= conf->paxel.v_start; 80 + isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A, 81 + ISPH3A_AFPAXSTART); 82 + 83 + /*SetIIRSH Register */ 84 + isp_reg_writel(af->isp, conf->iir.h_start, 85 + OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH); 86 + 87 + base_coef_set0 = ISPH3A_AFCOEF010; 88 + base_coef_set1 = ISPH3A_AFCOEF110; 89 + for (index = 0; index <= 8; index += 2) { 90 + /*Set IIR Filter0 Coefficients */ 91 + coef = 0; 92 + coef |= conf->iir.coeff_set0[index]; 93 + coef |= conf->iir.coeff_set0[index + 1] << 94 + AF_COEF_SHIFT; 95 + isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A, 96 + base_coef_set0); 97 + base_coef_set0 += AFCOEF_OFFSET; 98 + 99 + /*Set IIR Filter1 Coefficients */ 100 + coef = 0; 101 + coef |= conf->iir.coeff_set1[index]; 102 + coef |= conf->iir.coeff_set1[index + 1] << 103 + AF_COEF_SHIFT; 104 + isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A, 105 + base_coef_set1); 106 + base_coef_set1 += AFCOEF_OFFSET; 107 + } 108 + /* set AFCOEF0010 Register */ 109 + isp_reg_writel(af->isp, conf->iir.coeff_set0[10], 110 + OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010); 111 + /* set AFCOEF1010 Register */ 112 + isp_reg_writel(af->isp, conf->iir.coeff_set1[10], 113 + OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010); 114 + 115 + /* PCR Register */ 116 + /* Set RGB Position */ 117 + pcr = conf->rgb_pos << AF_RGBPOS_SHIFT; 118 + /* Set Accumulator Mode */ 119 + if (conf->fvmode == OMAP3ISP_AF_MODE_PEAK) 120 + pcr |= AF_FVMODE; 121 + /* Set A-law */ 122 + if (conf->alaw_enable) 123 + pcr |= AF_ALAW_EN; 124 + /* HMF Configurations */ 125 + if (conf->hmf.enable) { 126 + /* Enable HMF */ 127 + pcr |= AF_MED_EN; 128 + /* Set Median Threshold */ 129 + pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT; 130 + } 131 + /* Set PCR Register */ 132 + isp_reg_clr_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 133 + AF_PCR_MASK, pcr); 134 + 135 + af->update = 0; 136 + af->config_counter += af->inc_config; 137 + af->inc_config = 0; 138 + af->buf_size = conf->buf_size; 139 + } 140 + 141 + static void h3a_af_enable(struct ispstat *af, int enable) 142 + { 143 + if (enable) { 144 + isp_reg_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 145 + ISPH3A_PCR_AF_EN); 146 + /* This bit is already set if AEWB is enabled */ 147 + if (af->isp->isp_aewb.state != ISPSTAT_ENABLED) 148 + isp_reg_set(af->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 149 + ISPCTRL_H3A_CLK_EN); 150 + } else { 151 + isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, 152 + ISPH3A_PCR_AF_EN); 153 + /* This bit can't be cleared if AEWB is enabled */ 154 + if (af->isp->isp_aewb.state != ISPSTAT_ENABLED) 155 + isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 156 + ISPCTRL_H3A_CLK_EN); 157 + } 158 + } 159 + 160 + static int h3a_af_busy(struct ispstat *af) 161 + { 162 + return isp_reg_readl(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) 163 + & ISPH3A_PCR_BUSYAF; 164 + } 165 + 166 + static u32 h3a_af_get_buf_size(struct omap3isp_h3a_af_config *conf) 167 + { 168 + return conf->paxel.h_cnt * conf->paxel.v_cnt * OMAP3ISP_AF_PAXEL_SIZE; 169 + } 170 + 171 + /* Function to check paxel parameters */ 172 + static int h3a_af_validate_params(struct ispstat *af, void *new_conf) 173 + { 174 + struct omap3isp_h3a_af_config *user_cfg = new_conf; 175 + struct omap3isp_h3a_af_paxel *paxel_cfg = &user_cfg->paxel; 176 + struct omap3isp_h3a_af_iir *iir_cfg = &user_cfg->iir; 177 + int index; 178 + u32 buf_size; 179 + 180 + /* Check horizontal Count */ 181 + if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt, 182 + OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN, 183 + OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX)) 184 + return -EINVAL; 185 + 186 + /* Check Vertical Count */ 187 + if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt, 188 + OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN, 189 + OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX)) 190 + return -EINVAL; 191 + 192 + if (IS_OUT_OF_BOUNDS(paxel_cfg->height, OMAP3ISP_AF_PAXEL_HEIGHT_MIN, 193 + OMAP3ISP_AF_PAXEL_HEIGHT_MAX) || 194 + paxel_cfg->height % 2) 195 + return -EINVAL; 196 + 197 + /* Check width */ 198 + if (IS_OUT_OF_BOUNDS(paxel_cfg->width, OMAP3ISP_AF_PAXEL_WIDTH_MIN, 199 + OMAP3ISP_AF_PAXEL_WIDTH_MAX) || 200 + paxel_cfg->width % 2) 201 + return -EINVAL; 202 + 203 + /* Check Line Increment */ 204 + if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc, 205 + OMAP3ISP_AF_PAXEL_INCREMENT_MIN, 206 + OMAP3ISP_AF_PAXEL_INCREMENT_MAX) || 207 + paxel_cfg->line_inc % 2) 208 + return -EINVAL; 209 + 210 + /* Check Horizontal Start */ 211 + if ((paxel_cfg->h_start < iir_cfg->h_start) || 212 + IS_OUT_OF_BOUNDS(paxel_cfg->h_start, 213 + OMAP3ISP_AF_PAXEL_HZSTART_MIN, 214 + OMAP3ISP_AF_PAXEL_HZSTART_MAX)) 215 + return -EINVAL; 216 + 217 + /* Check IIR */ 218 + for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) { 219 + if ((iir_cfg->coeff_set0[index]) > OMAP3ISP_AF_COEF_MAX) 220 + return -EINVAL; 221 + 222 + if ((iir_cfg->coeff_set1[index]) > OMAP3ISP_AF_COEF_MAX) 223 + return -EINVAL; 224 + } 225 + 226 + if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, OMAP3ISP_AF_IIRSH_MIN, 227 + OMAP3ISP_AF_IIRSH_MAX)) 228 + return -EINVAL; 229 + 230 + /* Hack: If paxel size is 12, the 10th AF window may be corrupted */ 231 + if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) && 232 + (paxel_cfg->width * paxel_cfg->height == 12)) 233 + return -EINVAL; 234 + 235 + buf_size = h3a_af_get_buf_size(user_cfg); 236 + if (buf_size > user_cfg->buf_size) 237 + /* User buf_size request wasn't enough */ 238 + user_cfg->buf_size = buf_size; 239 + else if (user_cfg->buf_size > OMAP3ISP_AF_MAX_BUF_SIZE) 240 + user_cfg->buf_size = OMAP3ISP_AF_MAX_BUF_SIZE; 241 + 242 + return 0; 243 + } 244 + 245 + /* Update local parameters */ 246 + static void h3a_af_set_params(struct ispstat *af, void *new_conf) 247 + { 248 + struct omap3isp_h3a_af_config *user_cfg = new_conf; 249 + struct omap3isp_h3a_af_config *cur_cfg = af->priv; 250 + int update = 0; 251 + int index; 252 + 253 + /* alaw */ 254 + if (cur_cfg->alaw_enable != user_cfg->alaw_enable) { 255 + update = 1; 256 + goto out; 257 + } 258 + 259 + /* hmf */ 260 + if (cur_cfg->hmf.enable != user_cfg->hmf.enable) { 261 + update = 1; 262 + goto out; 263 + } 264 + if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) { 265 + update = 1; 266 + goto out; 267 + } 268 + 269 + /* rgbpos */ 270 + if (cur_cfg->rgb_pos != user_cfg->rgb_pos) { 271 + update = 1; 272 + goto out; 273 + } 274 + 275 + /* iir */ 276 + if (cur_cfg->iir.h_start != user_cfg->iir.h_start) { 277 + update = 1; 278 + goto out; 279 + } 280 + for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) { 281 + if (cur_cfg->iir.coeff_set0[index] != 282 + user_cfg->iir.coeff_set0[index]) { 283 + update = 1; 284 + goto out; 285 + } 286 + if (cur_cfg->iir.coeff_set1[index] != 287 + user_cfg->iir.coeff_set1[index]) { 288 + update = 1; 289 + goto out; 290 + } 291 + } 292 + 293 + /* paxel */ 294 + if ((cur_cfg->paxel.width != user_cfg->paxel.width) || 295 + (cur_cfg->paxel.height != user_cfg->paxel.height) || 296 + (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) || 297 + (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) || 298 + (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) || 299 + (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) || 300 + (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) { 301 + update = 1; 302 + goto out; 303 + } 304 + 305 + /* af_mode */ 306 + if (cur_cfg->fvmode != user_cfg->fvmode) 307 + update = 1; 308 + 309 + out: 310 + if (update || !af->configured) { 311 + memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg)); 312 + af->inc_config++; 313 + af->update = 1; 314 + /* 315 + * User might be asked for a bigger buffer than necessary for 316 + * this configuration. In order to return the right amount of 317 + * data during buffer request, let's calculate the size here 318 + * instead of stick with user_cfg->buf_size. 319 + */ 320 + cur_cfg->buf_size = h3a_af_get_buf_size(cur_cfg); 321 + } 322 + } 323 + 324 + static long h3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) 325 + { 326 + struct ispstat *stat = v4l2_get_subdevdata(sd); 327 + 328 + switch (cmd) { 329 + case VIDIOC_OMAP3ISP_AF_CFG: 330 + return omap3isp_stat_config(stat, arg); 331 + case VIDIOC_OMAP3ISP_STAT_REQ: 332 + return omap3isp_stat_request_statistics(stat, arg); 333 + case VIDIOC_OMAP3ISP_STAT_EN: { 334 + int *en = arg; 335 + return omap3isp_stat_enable(stat, !!*en); 336 + } 337 + } 338 + 339 + return -ENOIOCTLCMD; 340 + 341 + } 342 + 343 + static const struct ispstat_ops h3a_af_ops = { 344 + .validate_params = h3a_af_validate_params, 345 + .set_params = h3a_af_set_params, 346 + .setup_regs = h3a_af_setup_regs, 347 + .enable = h3a_af_enable, 348 + .busy = h3a_af_busy, 349 + }; 350 + 351 + static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = { 352 + .ioctl = h3a_af_ioctl, 353 + .subscribe_event = omap3isp_stat_subscribe_event, 354 + .unsubscribe_event = omap3isp_stat_unsubscribe_event, 355 + }; 356 + 357 + static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = { 358 + .s_stream = omap3isp_stat_s_stream, 359 + }; 360 + 361 + static const struct v4l2_subdev_ops h3a_af_subdev_ops = { 362 + .core = &h3a_af_subdev_core_ops, 363 + .video = &h3a_af_subdev_video_ops, 364 + }; 365 + 366 + /* Function to register the AF character device driver. */ 367 + int omap3isp_h3a_af_init(struct isp_device *isp) 368 + { 369 + struct ispstat *af = &isp->isp_af; 370 + struct omap3isp_h3a_af_config *af_cfg; 371 + struct omap3isp_h3a_af_config *af_recover_cfg; 372 + int ret; 373 + 374 + af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL); 375 + if (af_cfg == NULL) 376 + return -ENOMEM; 377 + 378 + memset(af, 0, sizeof(*af)); 379 + af->ops = &h3a_af_ops; 380 + af->priv = af_cfg; 381 + af->dma_ch = -1; 382 + af->event_type = V4L2_EVENT_OMAP3ISP_AF; 383 + af->isp = isp; 384 + 385 + /* Set recover state configuration */ 386 + af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL); 387 + if (!af_recover_cfg) { 388 + dev_err(af->isp->dev, "AF: cannot allocate memory for recover " 389 + "configuration.\n"); 390 + ret = -ENOMEM; 391 + goto err_recover_alloc; 392 + } 393 + 394 + af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN; 395 + af_recover_cfg->paxel.width = OMAP3ISP_AF_PAXEL_WIDTH_MIN; 396 + af_recover_cfg->paxel.height = OMAP3ISP_AF_PAXEL_HEIGHT_MIN; 397 + af_recover_cfg->paxel.h_cnt = OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN; 398 + af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN; 399 + af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN; 400 + if (h3a_af_validate_params(af, af_recover_cfg)) { 401 + dev_err(af->isp->dev, "AF: recover configuration is " 402 + "invalid.\n"); 403 + ret = -EINVAL; 404 + goto err_conf; 405 + } 406 + 407 + af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg); 408 + af->recover_priv = af_recover_cfg; 409 + 410 + ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops); 411 + if (ret) 412 + goto err_conf; 413 + 414 + return 0; 415 + 416 + err_conf: 417 + kfree(af_recover_cfg); 418 + err_recover_alloc: 419 + kfree(af_cfg); 420 + 421 + return ret; 422 + } 423 + 424 + void omap3isp_h3a_af_cleanup(struct isp_device *isp) 425 + { 426 + kfree(isp->isp_af.priv); 427 + kfree(isp->isp_af.recover_priv); 428 + omap3isp_stat_free(&isp->isp_af); 429 + }
+520
drivers/media/video/omap3isp/isphist.c
··· 1 + /* 2 + * isphist.c 3 + * 4 + * TI OMAP3 ISP - Histogram module 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc. 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #include <linux/delay.h> 29 + #include <linux/slab.h> 30 + #include <linux/uaccess.h> 31 + #include <linux/device.h> 32 + 33 + #include "isp.h" 34 + #include "ispreg.h" 35 + #include "isphist.h" 36 + 37 + #define HIST_CONFIG_DMA 1 38 + 39 + #define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0) 40 + 41 + /* 42 + * hist_reset_mem - clear Histogram memory before start stats engine. 43 + */ 44 + static void hist_reset_mem(struct ispstat *hist) 45 + { 46 + struct isp_device *isp = hist->isp; 47 + struct omap3isp_hist_config *conf = hist->priv; 48 + unsigned int i; 49 + 50 + isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); 51 + 52 + /* 53 + * By setting it, the histogram internal buffer is being cleared at the 54 + * same time it's being read. This bit must be cleared afterwards. 55 + */ 56 + isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); 57 + 58 + /* 59 + * We'll clear 4 words at each iteration for optimization. It avoids 60 + * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4. 61 + */ 62 + for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) { 63 + isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 64 + isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 65 + isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 66 + isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 67 + } 68 + isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); 69 + 70 + hist->wait_acc_frames = conf->num_acc_frames; 71 + } 72 + 73 + static void hist_dma_config(struct ispstat *hist) 74 + { 75 + hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32; 76 + hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT; 77 + hist->dma_config.frame_count = 1; 78 + hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT; 79 + hist->dma_config.src_start = OMAP3ISP_HIST_REG_BASE + ISPHIST_DATA; 80 + hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC; 81 + hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC; 82 + } 83 + 84 + /* 85 + * hist_setup_regs - Helper function to update Histogram registers. 86 + */ 87 + static void hist_setup_regs(struct ispstat *hist, void *priv) 88 + { 89 + struct isp_device *isp = hist->isp; 90 + struct omap3isp_hist_config *conf = priv; 91 + int c; 92 + u32 cnt; 93 + u32 wb_gain; 94 + u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS]; 95 + u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS]; 96 + 97 + if (!hist->update || hist->state == ISPSTAT_DISABLED || 98 + hist->state == ISPSTAT_DISABLING) 99 + return; 100 + 101 + cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT; 102 + 103 + wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT; 104 + wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT; 105 + wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT; 106 + if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER) 107 + wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT; 108 + 109 + /* Regions size and position */ 110 + for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) { 111 + if (c < conf->num_regions) { 112 + reg_hor[c] = conf->region[c].h_start << 113 + ISPHIST_REG_START_SHIFT; 114 + reg_hor[c] = conf->region[c].h_end << 115 + ISPHIST_REG_END_SHIFT; 116 + reg_ver[c] = conf->region[c].v_start << 117 + ISPHIST_REG_START_SHIFT; 118 + reg_ver[c] = conf->region[c].v_end << 119 + ISPHIST_REG_END_SHIFT; 120 + } else { 121 + reg_hor[c] = 0; 122 + reg_ver[c] = 0; 123 + } 124 + } 125 + 126 + cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT; 127 + switch (conf->hist_bins) { 128 + case OMAP3ISP_HIST_BINS_256: 129 + cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) << 130 + ISPHIST_CNT_SHIFT_SHIFT; 131 + break; 132 + case OMAP3ISP_HIST_BINS_128: 133 + cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) << 134 + ISPHIST_CNT_SHIFT_SHIFT; 135 + break; 136 + case OMAP3ISP_HIST_BINS_64: 137 + cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) << 138 + ISPHIST_CNT_SHIFT_SHIFT; 139 + break; 140 + default: /* OMAP3ISP_HIST_BINS_32 */ 141 + cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) << 142 + ISPHIST_CNT_SHIFT_SHIFT; 143 + break; 144 + } 145 + 146 + hist_reset_mem(hist); 147 + 148 + isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT); 149 + isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN); 150 + isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ); 151 + isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT); 152 + isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ); 153 + isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT); 154 + isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ); 155 + isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT); 156 + isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ); 157 + isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT); 158 + 159 + hist->update = 0; 160 + hist->config_counter += hist->inc_config; 161 + hist->inc_config = 0; 162 + hist->buf_size = conf->buf_size; 163 + } 164 + 165 + static void hist_enable(struct ispstat *hist, int enable) 166 + { 167 + if (enable) { 168 + isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR, 169 + ISPHIST_PCR_ENABLE); 170 + isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 171 + ISPCTRL_HIST_CLK_EN); 172 + } else { 173 + isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR, 174 + ISPHIST_PCR_ENABLE); 175 + isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 176 + ISPCTRL_HIST_CLK_EN); 177 + } 178 + } 179 + 180 + static int hist_busy(struct ispstat *hist) 181 + { 182 + return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR) 183 + & ISPHIST_PCR_BUSY; 184 + } 185 + 186 + static void hist_dma_cb(int lch, u16 ch_status, void *data) 187 + { 188 + struct ispstat *hist = data; 189 + 190 + if (ch_status & ~OMAP_DMA_BLOCK_IRQ) { 191 + dev_dbg(hist->isp->dev, "hist: DMA error. status = 0x%04x\n", 192 + ch_status); 193 + omap_stop_dma(lch); 194 + hist_reset_mem(hist); 195 + atomic_set(&hist->buf_err, 1); 196 + } 197 + isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, 198 + ISPHIST_CNT_CLEAR); 199 + 200 + omap3isp_stat_dma_isr(hist); 201 + if (hist->state != ISPSTAT_DISABLED) 202 + omap3isp_hist_dma_done(hist->isp); 203 + } 204 + 205 + static int hist_buf_dma(struct ispstat *hist) 206 + { 207 + dma_addr_t dma_addr = hist->active_buf->dma_addr; 208 + 209 + if (unlikely(!dma_addr)) { 210 + dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n"); 211 + hist_reset_mem(hist); 212 + return STAT_NO_BUF; 213 + } 214 + 215 + isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); 216 + isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, 217 + ISPHIST_CNT_CLEAR); 218 + omap3isp_flush(hist->isp); 219 + hist->dma_config.dst_start = dma_addr; 220 + hist->dma_config.elem_count = hist->buf_size / sizeof(u32); 221 + omap_set_dma_params(hist->dma_ch, &hist->dma_config); 222 + 223 + omap_start_dma(hist->dma_ch); 224 + 225 + return STAT_BUF_WAITING_DMA; 226 + } 227 + 228 + static int hist_buf_pio(struct ispstat *hist) 229 + { 230 + struct isp_device *isp = hist->isp; 231 + u32 *buf = hist->active_buf->virt_addr; 232 + unsigned int i; 233 + 234 + if (!buf) { 235 + dev_dbg(isp->dev, "hist: invalid PIO buffer address\n"); 236 + hist_reset_mem(hist); 237 + return STAT_NO_BUF; 238 + } 239 + 240 + isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); 241 + 242 + /* 243 + * By setting it, the histogram internal buffer is being cleared at the 244 + * same time it's being read. This bit must be cleared just after all 245 + * data is acquired. 246 + */ 247 + isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); 248 + 249 + /* 250 + * We'll read 4 times a 4-bytes-word at each iteration for 251 + * optimization. It avoids 3/4 of the jumps. We also know buf_size is 252 + * divisible by 16. 253 + */ 254 + for (i = hist->buf_size / 16; i > 0; i--) { 255 + *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 256 + *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 257 + *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 258 + *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); 259 + } 260 + isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, 261 + ISPHIST_CNT_CLEAR); 262 + 263 + return STAT_BUF_DONE; 264 + } 265 + 266 + /* 267 + * hist_buf_process - Callback from ISP driver for HIST interrupt. 268 + */ 269 + static int hist_buf_process(struct ispstat *hist) 270 + { 271 + struct omap3isp_hist_config *user_cfg = hist->priv; 272 + int ret; 273 + 274 + if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) { 275 + hist_reset_mem(hist); 276 + return STAT_NO_BUF; 277 + } 278 + 279 + if (--(hist->wait_acc_frames)) 280 + return STAT_NO_BUF; 281 + 282 + if (HIST_USING_DMA(hist)) 283 + ret = hist_buf_dma(hist); 284 + else 285 + ret = hist_buf_pio(hist); 286 + 287 + hist->wait_acc_frames = user_cfg->num_acc_frames; 288 + 289 + return ret; 290 + } 291 + 292 + static u32 hist_get_buf_size(struct omap3isp_hist_config *conf) 293 + { 294 + return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions; 295 + } 296 + 297 + /* 298 + * hist_validate_params - Helper function to check user given params. 299 + * @user_cfg: Pointer to user configuration structure. 300 + * 301 + * Returns 0 on success configuration. 302 + */ 303 + static int hist_validate_params(struct ispstat *hist, void *new_conf) 304 + { 305 + struct omap3isp_hist_config *user_cfg = new_conf; 306 + int c; 307 + u32 buf_size; 308 + 309 + if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3) 310 + return -EINVAL; 311 + 312 + /* Regions size and position */ 313 + 314 + if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) || 315 + (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS)) 316 + return -EINVAL; 317 + 318 + /* Regions */ 319 + for (c = 0; c < user_cfg->num_regions; c++) { 320 + if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK) 321 + return -EINVAL; 322 + if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK) 323 + return -EINVAL; 324 + if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK) 325 + return -EINVAL; 326 + if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK) 327 + return -EINVAL; 328 + if (user_cfg->region[c].h_start > user_cfg->region[c].h_end) 329 + return -EINVAL; 330 + if (user_cfg->region[c].v_start > user_cfg->region[c].v_end) 331 + return -EINVAL; 332 + } 333 + 334 + switch (user_cfg->num_regions) { 335 + case 1: 336 + if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256) 337 + return -EINVAL; 338 + break; 339 + case 2: 340 + if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128) 341 + return -EINVAL; 342 + break; 343 + default: /* 3 or 4 */ 344 + if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64) 345 + return -EINVAL; 346 + break; 347 + } 348 + 349 + buf_size = hist_get_buf_size(user_cfg); 350 + if (buf_size > user_cfg->buf_size) 351 + /* User's buf_size request wasn't enoght */ 352 + user_cfg->buf_size = buf_size; 353 + else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE) 354 + user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE; 355 + 356 + return 0; 357 + } 358 + 359 + static int hist_comp_params(struct ispstat *hist, 360 + struct omap3isp_hist_config *user_cfg) 361 + { 362 + struct omap3isp_hist_config *cur_cfg = hist->priv; 363 + int c; 364 + 365 + if (cur_cfg->cfa != user_cfg->cfa) 366 + return 1; 367 + 368 + if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames) 369 + return 1; 370 + 371 + if (cur_cfg->hist_bins != user_cfg->hist_bins) 372 + return 1; 373 + 374 + for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) { 375 + if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3) 376 + break; 377 + else if (cur_cfg->wg[c] != user_cfg->wg[c]) 378 + return 1; 379 + } 380 + 381 + if (cur_cfg->num_regions != user_cfg->num_regions) 382 + return 1; 383 + 384 + /* Regions */ 385 + for (c = 0; c < user_cfg->num_regions; c++) { 386 + if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start) 387 + return 1; 388 + if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end) 389 + return 1; 390 + if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start) 391 + return 1; 392 + if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end) 393 + return 1; 394 + } 395 + 396 + return 0; 397 + } 398 + 399 + /* 400 + * hist_update_params - Helper function to check and store user given params. 401 + * @new_conf: Pointer to user configuration structure. 402 + */ 403 + static void hist_set_params(struct ispstat *hist, void *new_conf) 404 + { 405 + struct omap3isp_hist_config *user_cfg = new_conf; 406 + struct omap3isp_hist_config *cur_cfg = hist->priv; 407 + 408 + if (!hist->configured || hist_comp_params(hist, user_cfg)) { 409 + memcpy(cur_cfg, user_cfg, sizeof(*user_cfg)); 410 + if (user_cfg->num_acc_frames == 0) 411 + user_cfg->num_acc_frames = 1; 412 + hist->inc_config++; 413 + hist->update = 1; 414 + /* 415 + * User might be asked for a bigger buffer than necessary for 416 + * this configuration. In order to return the right amount of 417 + * data during buffer request, let's calculate the size here 418 + * instead of stick with user_cfg->buf_size. 419 + */ 420 + cur_cfg->buf_size = hist_get_buf_size(cur_cfg); 421 + 422 + } 423 + } 424 + 425 + static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) 426 + { 427 + struct ispstat *stat = v4l2_get_subdevdata(sd); 428 + 429 + switch (cmd) { 430 + case VIDIOC_OMAP3ISP_HIST_CFG: 431 + return omap3isp_stat_config(stat, arg); 432 + case VIDIOC_OMAP3ISP_STAT_REQ: 433 + return omap3isp_stat_request_statistics(stat, arg); 434 + case VIDIOC_OMAP3ISP_STAT_EN: { 435 + int *en = arg; 436 + return omap3isp_stat_enable(stat, !!*en); 437 + } 438 + } 439 + 440 + return -ENOIOCTLCMD; 441 + 442 + } 443 + 444 + static const struct ispstat_ops hist_ops = { 445 + .validate_params = hist_validate_params, 446 + .set_params = hist_set_params, 447 + .setup_regs = hist_setup_regs, 448 + .enable = hist_enable, 449 + .busy = hist_busy, 450 + .buf_process = hist_buf_process, 451 + }; 452 + 453 + static const struct v4l2_subdev_core_ops hist_subdev_core_ops = { 454 + .ioctl = hist_ioctl, 455 + .subscribe_event = omap3isp_stat_subscribe_event, 456 + .unsubscribe_event = omap3isp_stat_unsubscribe_event, 457 + }; 458 + 459 + static const struct v4l2_subdev_video_ops hist_subdev_video_ops = { 460 + .s_stream = omap3isp_stat_s_stream, 461 + }; 462 + 463 + static const struct v4l2_subdev_ops hist_subdev_ops = { 464 + .core = &hist_subdev_core_ops, 465 + .video = &hist_subdev_video_ops, 466 + }; 467 + 468 + /* 469 + * omap3isp_hist_init - Module Initialization. 470 + */ 471 + int omap3isp_hist_init(struct isp_device *isp) 472 + { 473 + struct ispstat *hist = &isp->isp_hist; 474 + struct omap3isp_hist_config *hist_cfg; 475 + int ret = -1; 476 + 477 + hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL); 478 + if (hist_cfg == NULL) 479 + return -ENOMEM; 480 + 481 + memset(hist, 0, sizeof(*hist)); 482 + if (HIST_CONFIG_DMA) 483 + ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST", 484 + hist_dma_cb, hist, &hist->dma_ch); 485 + if (ret) { 486 + if (HIST_CONFIG_DMA) 487 + dev_warn(isp->dev, "hist: DMA request channel failed. " 488 + "Using PIO only.\n"); 489 + hist->dma_ch = -1; 490 + } else { 491 + dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch); 492 + hist_dma_config(hist); 493 + omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ); 494 + } 495 + 496 + hist->ops = &hist_ops; 497 + hist->priv = hist_cfg; 498 + hist->event_type = V4L2_EVENT_OMAP3ISP_HIST; 499 + hist->isp = isp; 500 + 501 + ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops); 502 + if (ret) { 503 + kfree(hist_cfg); 504 + if (HIST_USING_DMA(hist)) 505 + omap_free_dma(hist->dma_ch); 506 + } 507 + 508 + return ret; 509 + } 510 + 511 + /* 512 + * omap3isp_hist_cleanup - Module cleanup. 513 + */ 514 + void omap3isp_hist_cleanup(struct isp_device *isp) 515 + { 516 + if (HIST_USING_DMA(&isp->isp_hist)) 517 + omap_free_dma(isp->isp_hist.dma_ch); 518 + kfree(isp->isp_hist.priv); 519 + omap3isp_stat_free(&isp->isp_hist); 520 + }
+40
drivers/media/video/omap3isp/isphist.h
··· 1 + /* 2 + * isphist.h 3 + * 4 + * TI OMAP3 ISP - Histogram module 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc. 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #ifndef OMAP3_ISP_HIST_H 29 + #define OMAP3_ISP_HIST_H 30 + 31 + #include <linux/omap3isp.h> 32 + 33 + #define ISPHIST_IN_BIT_WIDTH_CCDC 10 34 + 35 + struct isp_device; 36 + 37 + int omap3isp_hist_init(struct isp_device *isp); 38 + void omap3isp_hist_cleanup(struct isp_device *isp); 39 + 40 + #endif /* OMAP3_ISP_HIST */
+1092
drivers/media/video/omap3isp/ispstat.c
··· 1 + /* 2 + * ispstat.c 3 + * 4 + * TI OMAP3 ISP - Statistics core 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #include <linux/dma-mapping.h> 29 + #include <linux/slab.h> 30 + #include <linux/uaccess.h> 31 + 32 + #include "isp.h" 33 + 34 + #define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0) 35 + 36 + /* 37 + * MAGIC_SIZE must always be the greatest common divisor of 38 + * AEWB_PACKET_SIZE and AF_PAXEL_SIZE. 39 + */ 40 + #define MAGIC_SIZE 16 41 + #define MAGIC_NUM 0x55 42 + 43 + /* HACK: AF module seems to be writing one more paxel data than it should. */ 44 + #define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE 45 + 46 + /* 47 + * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes 48 + * the next buffer to start to be written in the same point where the overflow 49 + * occurred instead of the configured address. The only known way to make it to 50 + * go back to a valid state is having a valid buffer processing. Of course it 51 + * requires at least a doubled buffer size to avoid an access to invalid memory 52 + * region. But it does not fix everything. It may happen more than one 53 + * consecutive SBL overflows. In that case, it might be unpredictable how many 54 + * buffers the allocated memory should fit. For that case, a recover 55 + * configuration was created. It produces the minimum buffer size for each H3A 56 + * module and decrease the change for more SBL overflows. This recover state 57 + * will be enabled every time a SBL overflow occur. As the output buffer size 58 + * isn't big, it's possible to have an extra size able to fit many recover 59 + * buffers making it extreamily unlikely to have an access to invalid memory 60 + * region. 61 + */ 62 + #define NUM_H3A_RECOVER_BUFS 10 63 + 64 + /* 65 + * HACK: Because of HW issues the generic layer sometimes need to have 66 + * different behaviour for different statistic modules. 67 + */ 68 + #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af) 69 + #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb) 70 + #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat)) 71 + 72 + static void __isp_stat_buf_sync_magic(struct ispstat *stat, 73 + struct ispstat_buffer *buf, 74 + u32 buf_size, enum dma_data_direction dir, 75 + void (*dma_sync)(struct device *, 76 + dma_addr_t, unsigned long, size_t, 77 + enum dma_data_direction)) 78 + { 79 + struct device *dev = stat->isp->dev; 80 + struct page *pg; 81 + dma_addr_t dma_addr; 82 + u32 offset; 83 + 84 + /* Initial magic words */ 85 + pg = vmalloc_to_page(buf->virt_addr); 86 + dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); 87 + dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir); 88 + 89 + /* Final magic words */ 90 + pg = vmalloc_to_page(buf->virt_addr + buf_size); 91 + dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); 92 + offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK; 93 + dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir); 94 + } 95 + 96 + static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, 97 + struct ispstat_buffer *buf, 98 + u32 buf_size, 99 + enum dma_data_direction dir) 100 + { 101 + if (IS_COHERENT_BUF(stat)) 102 + return; 103 + 104 + __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 105 + dma_sync_single_range_for_device); 106 + } 107 + 108 + static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, 109 + struct ispstat_buffer *buf, 110 + u32 buf_size, 111 + enum dma_data_direction dir) 112 + { 113 + if (IS_COHERENT_BUF(stat)) 114 + return; 115 + 116 + __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, 117 + dma_sync_single_range_for_cpu); 118 + } 119 + 120 + static int isp_stat_buf_check_magic(struct ispstat *stat, 121 + struct ispstat_buffer *buf) 122 + { 123 + const u32 buf_size = IS_H3A_AF(stat) ? 124 + buf->buf_size + AF_EXTRA_DATA : buf->buf_size; 125 + u8 *w; 126 + u8 *end; 127 + int ret = -EINVAL; 128 + 129 + isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); 130 + 131 + /* Checking initial magic numbers. They shouldn't be here anymore. */ 132 + for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++) 133 + if (likely(*w != MAGIC_NUM)) 134 + ret = 0; 135 + 136 + if (ret) { 137 + dev_dbg(stat->isp->dev, "%s: beginning magic check does not " 138 + "match.\n", stat->subdev.name); 139 + return ret; 140 + } 141 + 142 + /* Checking magic numbers at the end. They must be still here. */ 143 + for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE; 144 + w < end; w++) { 145 + if (unlikely(*w != MAGIC_NUM)) { 146 + dev_dbg(stat->isp->dev, "%s: endding magic check does " 147 + "not match.\n", stat->subdev.name); 148 + return -EINVAL; 149 + } 150 + } 151 + 152 + isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, 153 + DMA_FROM_DEVICE); 154 + 155 + return 0; 156 + } 157 + 158 + static void isp_stat_buf_insert_magic(struct ispstat *stat, 159 + struct ispstat_buffer *buf) 160 + { 161 + const u32 buf_size = IS_H3A_AF(stat) ? 162 + stat->buf_size + AF_EXTRA_DATA : stat->buf_size; 163 + 164 + isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); 165 + 166 + /* 167 + * Inserting MAGIC_NUM at the beginning and end of the buffer. 168 + * buf->buf_size is set only after the buffer is queued. For now the 169 + * right buf_size for the current configuration is pointed by 170 + * stat->buf_size. 171 + */ 172 + memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE); 173 + memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE); 174 + 175 + isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, 176 + DMA_BIDIRECTIONAL); 177 + } 178 + 179 + static void isp_stat_buf_sync_for_device(struct ispstat *stat, 180 + struct ispstat_buffer *buf) 181 + { 182 + if (IS_COHERENT_BUF(stat)) 183 + return; 184 + 185 + dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, 186 + buf->iovm->sgt->nents, DMA_FROM_DEVICE); 187 + } 188 + 189 + static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, 190 + struct ispstat_buffer *buf) 191 + { 192 + if (IS_COHERENT_BUF(stat)) 193 + return; 194 + 195 + dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, 196 + buf->iovm->sgt->nents, DMA_FROM_DEVICE); 197 + } 198 + 199 + static void isp_stat_buf_clear(struct ispstat *stat) 200 + { 201 + int i; 202 + 203 + for (i = 0; i < STAT_MAX_BUFS; i++) 204 + stat->buf[i].empty = 1; 205 + } 206 + 207 + static struct ispstat_buffer * 208 + __isp_stat_buf_find(struct ispstat *stat, int look_empty) 209 + { 210 + struct ispstat_buffer *found = NULL; 211 + int i; 212 + 213 + for (i = 0; i < STAT_MAX_BUFS; i++) { 214 + struct ispstat_buffer *curr = &stat->buf[i]; 215 + 216 + /* 217 + * Don't select the buffer which is being copied to 218 + * userspace or used by the module. 219 + */ 220 + if (curr == stat->locked_buf || curr == stat->active_buf) 221 + continue; 222 + 223 + /* Don't select uninitialised buffers if it's not required */ 224 + if (!look_empty && curr->empty) 225 + continue; 226 + 227 + /* Pick uninitialised buffer over anything else if look_empty */ 228 + if (curr->empty) { 229 + found = curr; 230 + break; 231 + } 232 + 233 + /* Choose the oldest buffer */ 234 + if (!found || 235 + (s32)curr->frame_number - (s32)found->frame_number < 0) 236 + found = curr; 237 + } 238 + 239 + return found; 240 + } 241 + 242 + static inline struct ispstat_buffer * 243 + isp_stat_buf_find_oldest(struct ispstat *stat) 244 + { 245 + return __isp_stat_buf_find(stat, 0); 246 + } 247 + 248 + static inline struct ispstat_buffer * 249 + isp_stat_buf_find_oldest_or_empty(struct ispstat *stat) 250 + { 251 + return __isp_stat_buf_find(stat, 1); 252 + } 253 + 254 + static int isp_stat_buf_queue(struct ispstat *stat) 255 + { 256 + if (!stat->active_buf) 257 + return STAT_NO_BUF; 258 + 259 + do_gettimeofday(&stat->active_buf->ts); 260 + 261 + stat->active_buf->buf_size = stat->buf_size; 262 + if (isp_stat_buf_check_magic(stat, stat->active_buf)) { 263 + dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", 264 + stat->subdev.name); 265 + return STAT_NO_BUF; 266 + } 267 + stat->active_buf->config_counter = stat->config_counter; 268 + stat->active_buf->frame_number = stat->frame_number; 269 + stat->active_buf->empty = 0; 270 + stat->active_buf = NULL; 271 + 272 + return STAT_BUF_DONE; 273 + } 274 + 275 + /* Get next free buffer to write the statistics to and mark it active. */ 276 + static void isp_stat_buf_next(struct ispstat *stat) 277 + { 278 + if (unlikely(stat->active_buf)) 279 + /* Overwriting unused active buffer */ 280 + dev_dbg(stat->isp->dev, "%s: new buffer requested without " 281 + "queuing active one.\n", 282 + stat->subdev.name); 283 + else 284 + stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); 285 + } 286 + 287 + static void isp_stat_buf_release(struct ispstat *stat) 288 + { 289 + unsigned long flags; 290 + 291 + isp_stat_buf_sync_for_device(stat, stat->locked_buf); 292 + spin_lock_irqsave(&stat->isp->stat_lock, flags); 293 + stat->locked_buf = NULL; 294 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 295 + } 296 + 297 + /* Get buffer to userspace. */ 298 + static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, 299 + struct omap3isp_stat_data *data) 300 + { 301 + int rval = 0; 302 + unsigned long flags; 303 + struct ispstat_buffer *buf; 304 + 305 + spin_lock_irqsave(&stat->isp->stat_lock, flags); 306 + 307 + while (1) { 308 + buf = isp_stat_buf_find_oldest(stat); 309 + if (!buf) { 310 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 311 + dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", 312 + stat->subdev.name); 313 + return ERR_PTR(-EBUSY); 314 + } 315 + if (isp_stat_buf_check_magic(stat, buf)) { 316 + dev_dbg(stat->isp->dev, "%s: current buffer has " 317 + "corrupted data\n.", stat->subdev.name); 318 + /* Mark empty because it doesn't have valid data. */ 319 + buf->empty = 1; 320 + } else { 321 + /* Buffer isn't corrupted. */ 322 + break; 323 + } 324 + } 325 + 326 + stat->locked_buf = buf; 327 + 328 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 329 + 330 + if (buf->buf_size > data->buf_size) { 331 + dev_warn(stat->isp->dev, "%s: userspace's buffer size is " 332 + "not enough.\n", stat->subdev.name); 333 + isp_stat_buf_release(stat); 334 + return ERR_PTR(-EINVAL); 335 + } 336 + 337 + isp_stat_buf_sync_for_cpu(stat, buf); 338 + 339 + rval = copy_to_user(data->buf, 340 + buf->virt_addr, 341 + buf->buf_size); 342 + 343 + if (rval) { 344 + dev_info(stat->isp->dev, 345 + "%s: failed copying %d bytes of stat data\n", 346 + stat->subdev.name, rval); 347 + buf = ERR_PTR(-EFAULT); 348 + isp_stat_buf_release(stat); 349 + } 350 + 351 + return buf; 352 + } 353 + 354 + static void isp_stat_bufs_free(struct ispstat *stat) 355 + { 356 + struct isp_device *isp = stat->isp; 357 + int i; 358 + 359 + for (i = 0; i < STAT_MAX_BUFS; i++) { 360 + struct ispstat_buffer *buf = &stat->buf[i]; 361 + 362 + if (!IS_COHERENT_BUF(stat)) { 363 + if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) 364 + continue; 365 + if (buf->iovm) 366 + dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, 367 + buf->iovm->sgt->nents, 368 + DMA_FROM_DEVICE); 369 + iommu_vfree(isp->iommu, buf->iommu_addr); 370 + } else { 371 + if (!buf->virt_addr) 372 + continue; 373 + dma_free_coherent(stat->isp->dev, stat->buf_alloc_size, 374 + buf->virt_addr, buf->dma_addr); 375 + } 376 + buf->iommu_addr = 0; 377 + buf->iovm = NULL; 378 + buf->dma_addr = 0; 379 + buf->virt_addr = NULL; 380 + buf->empty = 1; 381 + } 382 + 383 + dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", 384 + stat->subdev.name); 385 + 386 + stat->buf_alloc_size = 0; 387 + stat->active_buf = NULL; 388 + } 389 + 390 + static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) 391 + { 392 + struct isp_device *isp = stat->isp; 393 + int i; 394 + 395 + stat->buf_alloc_size = size; 396 + 397 + for (i = 0; i < STAT_MAX_BUFS; i++) { 398 + struct ispstat_buffer *buf = &stat->buf[i]; 399 + struct iovm_struct *iovm; 400 + 401 + WARN_ON(buf->dma_addr); 402 + buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, 403 + IOMMU_FLAG); 404 + if (IS_ERR((void *)buf->iommu_addr)) { 405 + dev_err(stat->isp->dev, 406 + "%s: Can't acquire memory for " 407 + "buffer %d\n", stat->subdev.name, i); 408 + isp_stat_bufs_free(stat); 409 + return -ENOMEM; 410 + } 411 + 412 + iovm = find_iovm_area(isp->iommu, buf->iommu_addr); 413 + if (!iovm || 414 + !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, 415 + DMA_FROM_DEVICE)) { 416 + isp_stat_bufs_free(stat); 417 + return -ENOMEM; 418 + } 419 + buf->iovm = iovm; 420 + 421 + buf->virt_addr = da_to_va(stat->isp->iommu, 422 + (u32)buf->iommu_addr); 423 + buf->empty = 1; 424 + dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." 425 + "iommu_addr=0x%08lx virt_addr=0x%08lx", 426 + stat->subdev.name, i, buf->iommu_addr, 427 + (unsigned long)buf->virt_addr); 428 + } 429 + 430 + return 0; 431 + } 432 + 433 + static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size) 434 + { 435 + int i; 436 + 437 + stat->buf_alloc_size = size; 438 + 439 + for (i = 0; i < STAT_MAX_BUFS; i++) { 440 + struct ispstat_buffer *buf = &stat->buf[i]; 441 + 442 + WARN_ON(buf->iommu_addr); 443 + buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size, 444 + &buf->dma_addr, GFP_KERNEL | GFP_DMA); 445 + 446 + if (!buf->virt_addr || !buf->dma_addr) { 447 + dev_info(stat->isp->dev, 448 + "%s: Can't acquire memory for " 449 + "DMA buffer %d\n", stat->subdev.name, i); 450 + isp_stat_bufs_free(stat); 451 + return -ENOMEM; 452 + } 453 + buf->empty = 1; 454 + 455 + dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." 456 + "dma_addr=0x%08lx virt_addr=0x%08lx\n", 457 + stat->subdev.name, i, (unsigned long)buf->dma_addr, 458 + (unsigned long)buf->virt_addr); 459 + } 460 + 461 + return 0; 462 + } 463 + 464 + static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) 465 + { 466 + unsigned long flags; 467 + 468 + spin_lock_irqsave(&stat->isp->stat_lock, flags); 469 + 470 + BUG_ON(stat->locked_buf != NULL); 471 + 472 + /* Are the old buffers big enough? */ 473 + if (stat->buf_alloc_size >= size) { 474 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 475 + return 0; 476 + } 477 + 478 + if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { 479 + dev_info(stat->isp->dev, 480 + "%s: trying to allocate memory when busy\n", 481 + stat->subdev.name); 482 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 483 + return -EBUSY; 484 + } 485 + 486 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 487 + 488 + isp_stat_bufs_free(stat); 489 + 490 + if (IS_COHERENT_BUF(stat)) 491 + return isp_stat_bufs_alloc_dma(stat, size); 492 + else 493 + return isp_stat_bufs_alloc_iommu(stat, size); 494 + } 495 + 496 + static void isp_stat_queue_event(struct ispstat *stat, int err) 497 + { 498 + struct video_device *vdev = &stat->subdev.devnode; 499 + struct v4l2_event event; 500 + struct omap3isp_stat_event_status *status = (void *)event.u.data; 501 + 502 + memset(&event, 0, sizeof(event)); 503 + if (!err) { 504 + status->frame_number = stat->frame_number; 505 + status->config_counter = stat->config_counter; 506 + } else { 507 + status->buf_err = 1; 508 + } 509 + event.type = stat->event_type; 510 + v4l2_event_queue(vdev, &event); 511 + } 512 + 513 + 514 + /* 515 + * omap3isp_stat_request_statistics - Request statistics. 516 + * @data: Pointer to return statistics data. 517 + * 518 + * Returns 0 if successful. 519 + */ 520 + int omap3isp_stat_request_statistics(struct ispstat *stat, 521 + struct omap3isp_stat_data *data) 522 + { 523 + struct ispstat_buffer *buf; 524 + 525 + if (stat->state != ISPSTAT_ENABLED) { 526 + dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", 527 + stat->subdev.name); 528 + return -EINVAL; 529 + } 530 + 531 + mutex_lock(&stat->ioctl_lock); 532 + buf = isp_stat_buf_get(stat, data); 533 + if (IS_ERR(buf)) { 534 + mutex_unlock(&stat->ioctl_lock); 535 + return PTR_ERR(buf); 536 + } 537 + 538 + data->ts = buf->ts; 539 + data->config_counter = buf->config_counter; 540 + data->frame_number = buf->frame_number; 541 + data->buf_size = buf->buf_size; 542 + 543 + buf->empty = 1; 544 + isp_stat_buf_release(stat); 545 + mutex_unlock(&stat->ioctl_lock); 546 + 547 + return 0; 548 + } 549 + 550 + /* 551 + * omap3isp_stat_config - Receives new statistic engine configuration. 552 + * @new_conf: Pointer to config structure. 553 + * 554 + * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if 555 + * was unable to allocate memory for the buffer, or other errors if parameters 556 + * are invalid. 557 + */ 558 + int omap3isp_stat_config(struct ispstat *stat, void *new_conf) 559 + { 560 + int ret; 561 + unsigned long irqflags; 562 + struct ispstat_generic_config *user_cfg = new_conf; 563 + u32 buf_size = user_cfg->buf_size; 564 + 565 + if (!new_conf) { 566 + dev_dbg(stat->isp->dev, "%s: configuration is NULL\n", 567 + stat->subdev.name); 568 + return -EINVAL; 569 + } 570 + 571 + mutex_lock(&stat->ioctl_lock); 572 + 573 + dev_dbg(stat->isp->dev, "%s: configuring module with buffer " 574 + "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size); 575 + 576 + ret = stat->ops->validate_params(stat, new_conf); 577 + if (ret) { 578 + mutex_unlock(&stat->ioctl_lock); 579 + dev_dbg(stat->isp->dev, "%s: configuration values are " 580 + "invalid.\n", stat->subdev.name); 581 + return ret; 582 + } 583 + 584 + if (buf_size != user_cfg->buf_size) 585 + dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size " 586 + "request to 0x%08lx\n", stat->subdev.name, 587 + (unsigned long)user_cfg->buf_size); 588 + 589 + /* 590 + * Hack: H3A modules may need a doubled buffer size to avoid access 591 + * to a invalid memory address after a SBL overflow. 592 + * The buffer size is always PAGE_ALIGNED. 593 + * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be 594 + * inserted at the end to data integrity check purpose. 595 + * Hack 3: AF module writes one paxel data more than it should, so 596 + * the buffer allocation must consider it to avoid invalid memory 597 + * access. 598 + * Hack 4: H3A need to allocate extra space for the recover state. 599 + */ 600 + if (IS_H3A(stat)) { 601 + buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE; 602 + if (IS_H3A_AF(stat)) 603 + /* 604 + * Adding one extra paxel data size for each recover 605 + * buffer + 2 regular ones. 606 + */ 607 + buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2); 608 + if (stat->recover_priv) { 609 + struct ispstat_generic_config *recover_cfg = 610 + stat->recover_priv; 611 + buf_size += recover_cfg->buf_size * 612 + NUM_H3A_RECOVER_BUFS; 613 + } 614 + buf_size = PAGE_ALIGN(buf_size); 615 + } else { /* Histogram */ 616 + buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE); 617 + } 618 + 619 + ret = isp_stat_bufs_alloc(stat, buf_size); 620 + if (ret) { 621 + mutex_unlock(&stat->ioctl_lock); 622 + return ret; 623 + } 624 + 625 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 626 + stat->ops->set_params(stat, new_conf); 627 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 628 + 629 + /* 630 + * Returning the right future config_counter for this setup, so 631 + * userspace can *know* when it has been applied. 632 + */ 633 + user_cfg->config_counter = stat->config_counter + stat->inc_config; 634 + 635 + /* Module has a valid configuration. */ 636 + stat->configured = 1; 637 + dev_dbg(stat->isp->dev, "%s: module has been successfully " 638 + "configured.\n", stat->subdev.name); 639 + 640 + mutex_unlock(&stat->ioctl_lock); 641 + 642 + return 0; 643 + } 644 + 645 + /* 646 + * isp_stat_buf_process - Process statistic buffers. 647 + * @buf_state: points out if buffer is ready to be processed. It's necessary 648 + * because histogram needs to copy the data from internal memory 649 + * before be able to process the buffer. 650 + */ 651 + static int isp_stat_buf_process(struct ispstat *stat, int buf_state) 652 + { 653 + int ret = STAT_NO_BUF; 654 + 655 + if (!atomic_add_unless(&stat->buf_err, -1, 0) && 656 + buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { 657 + ret = isp_stat_buf_queue(stat); 658 + isp_stat_buf_next(stat); 659 + } 660 + 661 + return ret; 662 + } 663 + 664 + int omap3isp_stat_pcr_busy(struct ispstat *stat) 665 + { 666 + return stat->ops->busy(stat); 667 + } 668 + 669 + int omap3isp_stat_busy(struct ispstat *stat) 670 + { 671 + return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | 672 + (stat->state != ISPSTAT_DISABLED); 673 + } 674 + 675 + /* 676 + * isp_stat_pcr_enable - Disables/Enables statistic engines. 677 + * @pcr_enable: 0/1 - Disables/Enables the engine. 678 + * 679 + * Must be called from ISP driver when the module is idle and synchronized 680 + * with CCDC. 681 + */ 682 + static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable) 683 + { 684 + if ((stat->state != ISPSTAT_ENABLING && 685 + stat->state != ISPSTAT_ENABLED) && pcr_enable) 686 + /* Userspace has disabled the module. Aborting. */ 687 + return; 688 + 689 + stat->ops->enable(stat, pcr_enable); 690 + if (stat->state == ISPSTAT_DISABLING && !pcr_enable) 691 + stat->state = ISPSTAT_DISABLED; 692 + else if (stat->state == ISPSTAT_ENABLING && pcr_enable) 693 + stat->state = ISPSTAT_ENABLED; 694 + } 695 + 696 + void omap3isp_stat_suspend(struct ispstat *stat) 697 + { 698 + unsigned long flags; 699 + 700 + spin_lock_irqsave(&stat->isp->stat_lock, flags); 701 + 702 + if (stat->state != ISPSTAT_DISABLED) 703 + stat->ops->enable(stat, 0); 704 + if (stat->state == ISPSTAT_ENABLED) 705 + stat->state = ISPSTAT_SUSPENDED; 706 + 707 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 708 + } 709 + 710 + void omap3isp_stat_resume(struct ispstat *stat) 711 + { 712 + /* Module will be re-enabled with its pipeline */ 713 + if (stat->state == ISPSTAT_SUSPENDED) 714 + stat->state = ISPSTAT_ENABLING; 715 + } 716 + 717 + static void isp_stat_try_enable(struct ispstat *stat) 718 + { 719 + unsigned long irqflags; 720 + 721 + if (stat->priv == NULL) 722 + /* driver wasn't initialised */ 723 + return; 724 + 725 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 726 + if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && 727 + stat->buf_alloc_size) { 728 + /* 729 + * Userspace's requested to enable the engine but it wasn't yet. 730 + * Let's do that now. 731 + */ 732 + stat->update = 1; 733 + isp_stat_buf_next(stat); 734 + stat->ops->setup_regs(stat, stat->priv); 735 + isp_stat_buf_insert_magic(stat, stat->active_buf); 736 + 737 + /* 738 + * H3A module has some hw issues which forces the driver to 739 + * ignore next buffers even if it was disabled in the meantime. 740 + * On the other hand, Histogram shouldn't ignore buffers anymore 741 + * if it's being enabled. 742 + */ 743 + if (!IS_H3A(stat)) 744 + atomic_set(&stat->buf_err, 0); 745 + 746 + isp_stat_pcr_enable(stat, 1); 747 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 748 + dev_dbg(stat->isp->dev, "%s: module is enabled.\n", 749 + stat->subdev.name); 750 + } else { 751 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 752 + } 753 + } 754 + 755 + void omap3isp_stat_isr_frame_sync(struct ispstat *stat) 756 + { 757 + isp_stat_try_enable(stat); 758 + } 759 + 760 + void omap3isp_stat_sbl_overflow(struct ispstat *stat) 761 + { 762 + unsigned long irqflags; 763 + 764 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 765 + /* 766 + * Due to a H3A hw issue which prevents the next buffer to start from 767 + * the correct memory address, 2 buffers must be ignored. 768 + */ 769 + atomic_set(&stat->buf_err, 2); 770 + 771 + /* 772 + * If more than one SBL overflow happen in a row, H3A module may access 773 + * invalid memory region. 774 + * stat->sbl_ovl_recover is set to tell to the driver to temporarily use 775 + * a soft configuration which helps to avoid consecutive overflows. 776 + */ 777 + if (stat->recover_priv) 778 + stat->sbl_ovl_recover = 1; 779 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 780 + } 781 + 782 + /* 783 + * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible 784 + * @enable: 0/1 - Disables/Enables the engine. 785 + * 786 + * Client should configure all the module registers before this. 787 + * This function can be called from a userspace request. 788 + */ 789 + int omap3isp_stat_enable(struct ispstat *stat, u8 enable) 790 + { 791 + unsigned long irqflags; 792 + 793 + dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", 794 + stat->subdev.name, enable ? "enable" : "disable"); 795 + 796 + /* Prevent enabling while configuring */ 797 + mutex_lock(&stat->ioctl_lock); 798 + 799 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 800 + 801 + if (!stat->configured && enable) { 802 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 803 + mutex_unlock(&stat->ioctl_lock); 804 + dev_dbg(stat->isp->dev, "%s: cannot enable module as it's " 805 + "never been successfully configured so far.\n", 806 + stat->subdev.name); 807 + return -EINVAL; 808 + } 809 + 810 + if (enable) { 811 + if (stat->state == ISPSTAT_DISABLING) 812 + /* Previous disabling request wasn't done yet */ 813 + stat->state = ISPSTAT_ENABLED; 814 + else if (stat->state == ISPSTAT_DISABLED) 815 + /* Module is now being enabled */ 816 + stat->state = ISPSTAT_ENABLING; 817 + } else { 818 + if (stat->state == ISPSTAT_ENABLING) { 819 + /* Previous enabling request wasn't done yet */ 820 + stat->state = ISPSTAT_DISABLED; 821 + } else if (stat->state == ISPSTAT_ENABLED) { 822 + /* Module is now being disabled */ 823 + stat->state = ISPSTAT_DISABLING; 824 + isp_stat_buf_clear(stat); 825 + } 826 + } 827 + 828 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 829 + mutex_unlock(&stat->ioctl_lock); 830 + 831 + return 0; 832 + } 833 + 834 + int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable) 835 + { 836 + struct ispstat *stat = v4l2_get_subdevdata(subdev); 837 + 838 + if (enable) { 839 + /* 840 + * Only set enable PCR bit if the module was previously 841 + * enabled through ioct. 842 + */ 843 + isp_stat_try_enable(stat); 844 + } else { 845 + unsigned long flags; 846 + /* Disable PCR bit and config enable field */ 847 + omap3isp_stat_enable(stat, 0); 848 + spin_lock_irqsave(&stat->isp->stat_lock, flags); 849 + stat->ops->enable(stat, 0); 850 + spin_unlock_irqrestore(&stat->isp->stat_lock, flags); 851 + 852 + /* 853 + * If module isn't busy, a new interrupt may come or not to 854 + * set the state to DISABLED. As Histogram needs to read its 855 + * internal memory to clear it, let interrupt handler 856 + * responsible of changing state to DISABLED. If the last 857 + * interrupt is coming, it's still safe as the handler will 858 + * ignore the second time when state is already set to DISABLED. 859 + * It's necessary to synchronize Histogram with streamoff, once 860 + * the module may be considered idle before last SDMA transfer 861 + * starts if we return here. 862 + */ 863 + if (!omap3isp_stat_pcr_busy(stat)) 864 + omap3isp_stat_isr(stat); 865 + 866 + dev_dbg(stat->isp->dev, "%s: module is being disabled\n", 867 + stat->subdev.name); 868 + } 869 + 870 + return 0; 871 + } 872 + 873 + /* 874 + * __stat_isr - Interrupt handler for statistic drivers 875 + */ 876 + static void __stat_isr(struct ispstat *stat, int from_dma) 877 + { 878 + int ret = STAT_BUF_DONE; 879 + int buf_processing; 880 + unsigned long irqflags; 881 + struct isp_pipeline *pipe; 882 + 883 + /* 884 + * stat->buf_processing must be set before disable module. It's 885 + * necessary to not inform too early the buffers aren't busy in case 886 + * of SDMA is going to be used. 887 + */ 888 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 889 + if (stat->state == ISPSTAT_DISABLED) { 890 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 891 + return; 892 + } 893 + buf_processing = stat->buf_processing; 894 + stat->buf_processing = 1; 895 + stat->ops->enable(stat, 0); 896 + 897 + if (buf_processing && !from_dma) { 898 + if (stat->state == ISPSTAT_ENABLED) { 899 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 900 + dev_err(stat->isp->dev, 901 + "%s: interrupt occurred when module was still " 902 + "processing a buffer.\n", stat->subdev.name); 903 + ret = STAT_NO_BUF; 904 + goto out; 905 + } else { 906 + /* 907 + * Interrupt handler was called from streamoff when 908 + * the module wasn't busy anymore to ensure it is being 909 + * disabled after process last buffer. If such buffer 910 + * processing has already started, no need to do 911 + * anything else. 912 + */ 913 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 914 + return; 915 + } 916 + } 917 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 918 + 919 + /* If it's busy we can't process this buffer anymore */ 920 + if (!omap3isp_stat_pcr_busy(stat)) { 921 + if (!from_dma && stat->ops->buf_process) 922 + /* Module still need to copy data to buffer. */ 923 + ret = stat->ops->buf_process(stat); 924 + if (ret == STAT_BUF_WAITING_DMA) 925 + /* Buffer is not ready yet */ 926 + return; 927 + 928 + spin_lock_irqsave(&stat->isp->stat_lock, irqflags); 929 + 930 + /* 931 + * Histogram needs to read its internal memory to clear it 932 + * before be disabled. For that reason, common statistic layer 933 + * can return only after call stat's buf_process() operator. 934 + */ 935 + if (stat->state == ISPSTAT_DISABLING) { 936 + stat->state = ISPSTAT_DISABLED; 937 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 938 + stat->buf_processing = 0; 939 + return; 940 + } 941 + pipe = to_isp_pipeline(&stat->subdev.entity); 942 + stat->frame_number = atomic_read(&pipe->frame_number); 943 + 944 + /* 945 + * Before this point, 'ret' stores the buffer's status if it's 946 + * ready to be processed. Afterwards, it holds the status if 947 + * it was processed successfully. 948 + */ 949 + ret = isp_stat_buf_process(stat, ret); 950 + 951 + if (likely(!stat->sbl_ovl_recover)) { 952 + stat->ops->setup_regs(stat, stat->priv); 953 + } else { 954 + /* 955 + * Using recover config to increase the chance to have 956 + * a good buffer processing and make the H3A module to 957 + * go back to a valid state. 958 + */ 959 + stat->update = 1; 960 + stat->ops->setup_regs(stat, stat->recover_priv); 961 + stat->sbl_ovl_recover = 0; 962 + 963 + /* 964 + * Set 'update' in case of the module needs to use 965 + * regular configuration after next buffer. 966 + */ 967 + stat->update = 1; 968 + } 969 + 970 + isp_stat_buf_insert_magic(stat, stat->active_buf); 971 + 972 + /* 973 + * Hack: H3A modules may access invalid memory address or send 974 + * corrupted data to userspace if more than 1 SBL overflow 975 + * happens in a row without re-writing its buffer's start memory 976 + * address in the meantime. Such situation is avoided if the 977 + * module is not immediately re-enabled when the ISR misses the 978 + * timing to process the buffer and to setup the registers. 979 + * Because of that, pcr_enable(1) was moved to inside this 'if' 980 + * block. But the next interruption will still happen as during 981 + * pcr_enable(0) the module was busy. 982 + */ 983 + isp_stat_pcr_enable(stat, 1); 984 + spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); 985 + } else { 986 + /* 987 + * If a SBL overflow occurs and the H3A driver misses the timing 988 + * to process the buffer, stat->buf_err is set and won't be 989 + * cleared now. So the next buffer will be correctly ignored. 990 + * It's necessary due to a hw issue which makes the next H3A 991 + * buffer to start from the memory address where the previous 992 + * one stopped, instead of start where it was configured to. 993 + * Do not "stat->buf_err = 0" here. 994 + */ 995 + 996 + if (stat->ops->buf_process) 997 + /* 998 + * Driver may need to erase current data prior to 999 + * process a new buffer. If it misses the timing, the 1000 + * next buffer might be wrong. So should be ignored. 1001 + * It happens only for Histogram. 1002 + */ 1003 + atomic_set(&stat->buf_err, 1); 1004 + 1005 + ret = STAT_NO_BUF; 1006 + dev_dbg(stat->isp->dev, "%s: cannot process buffer, " 1007 + "device is busy.\n", stat->subdev.name); 1008 + } 1009 + 1010 + out: 1011 + stat->buf_processing = 0; 1012 + isp_stat_queue_event(stat, ret != STAT_BUF_DONE); 1013 + } 1014 + 1015 + void omap3isp_stat_isr(struct ispstat *stat) 1016 + { 1017 + __stat_isr(stat, 0); 1018 + } 1019 + 1020 + void omap3isp_stat_dma_isr(struct ispstat *stat) 1021 + { 1022 + __stat_isr(stat, 1); 1023 + } 1024 + 1025 + static int isp_stat_init_entities(struct ispstat *stat, const char *name, 1026 + const struct v4l2_subdev_ops *sd_ops) 1027 + { 1028 + struct v4l2_subdev *subdev = &stat->subdev; 1029 + struct media_entity *me = &subdev->entity; 1030 + 1031 + v4l2_subdev_init(subdev, sd_ops); 1032 + snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name); 1033 + subdev->grp_id = 1 << 16; /* group ID for isp subdevs */ 1034 + subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; 1035 + subdev->nevents = STAT_NEVENTS; 1036 + v4l2_set_subdevdata(subdev, stat); 1037 + 1038 + stat->pad.flags = MEDIA_PAD_FL_SINK; 1039 + me->ops = NULL; 1040 + 1041 + return media_entity_init(me, 1, &stat->pad, 0); 1042 + } 1043 + 1044 + int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev, 1045 + struct v4l2_fh *fh, 1046 + struct v4l2_event_subscription *sub) 1047 + { 1048 + struct ispstat *stat = v4l2_get_subdevdata(subdev); 1049 + 1050 + if (sub->type != stat->event_type) 1051 + return -EINVAL; 1052 + 1053 + return v4l2_event_subscribe(fh, sub); 1054 + } 1055 + 1056 + int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, 1057 + struct v4l2_fh *fh, 1058 + struct v4l2_event_subscription *sub) 1059 + { 1060 + return v4l2_event_unsubscribe(fh, sub); 1061 + } 1062 + 1063 + void omap3isp_stat_unregister_entities(struct ispstat *stat) 1064 + { 1065 + media_entity_cleanup(&stat->subdev.entity); 1066 + v4l2_device_unregister_subdev(&stat->subdev); 1067 + } 1068 + 1069 + int omap3isp_stat_register_entities(struct ispstat *stat, 1070 + struct v4l2_device *vdev) 1071 + { 1072 + return v4l2_device_register_subdev(vdev, &stat->subdev); 1073 + } 1074 + 1075 + int omap3isp_stat_init(struct ispstat *stat, const char *name, 1076 + const struct v4l2_subdev_ops *sd_ops) 1077 + { 1078 + stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); 1079 + if (!stat->buf) 1080 + return -ENOMEM; 1081 + isp_stat_buf_clear(stat); 1082 + mutex_init(&stat->ioctl_lock); 1083 + atomic_set(&stat->buf_err, 0); 1084 + 1085 + return isp_stat_init_entities(stat, name, sd_ops); 1086 + } 1087 + 1088 + void omap3isp_stat_free(struct ispstat *stat) 1089 + { 1090 + isp_stat_bufs_free(stat); 1091 + kfree(stat->buf); 1092 + }
+169
drivers/media/video/omap3isp/ispstat.h
··· 1 + /* 2 + * ispstat.h 3 + * 4 + * TI OMAP3 ISP - Statistics core 5 + * 6 + * Copyright (C) 2010 Nokia Corporation 7 + * Copyright (C) 2009 Texas Instruments, Inc 8 + * 9 + * Contacts: David Cohen <dacohen@gmail.com> 10 + * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 11 + * Sakari Ailus <sakari.ailus@iki.fi> 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 25 + * 02110-1301 USA 26 + */ 27 + 28 + #ifndef OMAP3_ISP_STAT_H 29 + #define OMAP3_ISP_STAT_H 30 + 31 + #include <linux/types.h> 32 + #include <linux/omap3isp.h> 33 + #include <plat/dma.h> 34 + #include <media/v4l2-event.h> 35 + 36 + #include "isp.h" 37 + #include "ispvideo.h" 38 + 39 + #define STAT_MAX_BUFS 5 40 + #define STAT_NEVENTS 8 41 + 42 + #define STAT_BUF_DONE 0 /* Buffer is ready */ 43 + #define STAT_NO_BUF 1 /* An error has occurred */ 44 + #define STAT_BUF_WAITING_DMA 2 /* Histogram only: DMA is running */ 45 + 46 + struct ispstat; 47 + 48 + struct ispstat_buffer { 49 + unsigned long iommu_addr; 50 + struct iovm_struct *iovm; 51 + void *virt_addr; 52 + dma_addr_t dma_addr; 53 + struct timeval ts; 54 + u32 buf_size; 55 + u32 frame_number; 56 + u16 config_counter; 57 + u8 empty; 58 + }; 59 + 60 + struct ispstat_ops { 61 + /* 62 + * Validate new params configuration. 63 + * new_conf->buf_size value must be changed to the exact buffer size 64 + * necessary for the new configuration if it's smaller. 65 + */ 66 + int (*validate_params)(struct ispstat *stat, void *new_conf); 67 + 68 + /* 69 + * Save new params configuration. 70 + * stat->priv->buf_size value must be set to the exact buffer size for 71 + * the new configuration. 72 + * stat->update is set to 1 if new configuration is different than 73 + * current one. 74 + */ 75 + void (*set_params)(struct ispstat *stat, void *new_conf); 76 + 77 + /* Apply stored configuration. */ 78 + void (*setup_regs)(struct ispstat *stat, void *priv); 79 + 80 + /* Enable/Disable module. */ 81 + void (*enable)(struct ispstat *stat, int enable); 82 + 83 + /* Verify is module is busy. */ 84 + int (*busy)(struct ispstat *stat); 85 + 86 + /* Used for specific operations during generic buf process task. */ 87 + int (*buf_process)(struct ispstat *stat); 88 + }; 89 + 90 + enum ispstat_state_t { 91 + ISPSTAT_DISABLED = 0, 92 + ISPSTAT_DISABLING, 93 + ISPSTAT_ENABLED, 94 + ISPSTAT_ENABLING, 95 + ISPSTAT_SUSPENDED, 96 + }; 97 + 98 + struct ispstat { 99 + struct v4l2_subdev subdev; 100 + struct media_pad pad; /* sink pad */ 101 + 102 + /* Control */ 103 + unsigned configured:1; 104 + unsigned update:1; 105 + unsigned buf_processing:1; 106 + unsigned sbl_ovl_recover:1; 107 + u8 inc_config; 108 + atomic_t buf_err; 109 + enum ispstat_state_t state; /* enabling/disabling state */ 110 + struct omap_dma_channel_params dma_config; 111 + struct isp_device *isp; 112 + void *priv; /* pointer to priv config struct */ 113 + void *recover_priv; /* pointer to recover priv configuration */ 114 + struct mutex ioctl_lock; /* serialize private ioctl */ 115 + 116 + const struct ispstat_ops *ops; 117 + 118 + /* Buffer */ 119 + u8 wait_acc_frames; 120 + u16 config_counter; 121 + u32 frame_number; 122 + u32 buf_size; 123 + u32 buf_alloc_size; 124 + int dma_ch; 125 + unsigned long event_type; 126 + struct ispstat_buffer *buf; 127 + struct ispstat_buffer *active_buf; 128 + struct ispstat_buffer *locked_buf; 129 + }; 130 + 131 + struct ispstat_generic_config { 132 + /* 133 + * Fields must be in the same order as in: 134 + * - isph3a_aewb_config 135 + * - isph3a_af_config 136 + * - isphist_config 137 + */ 138 + u32 buf_size; 139 + u16 config_counter; 140 + }; 141 + 142 + int omap3isp_stat_config(struct ispstat *stat, void *new_conf); 143 + int omap3isp_stat_request_statistics(struct ispstat *stat, 144 + struct omap3isp_stat_data *data); 145 + int omap3isp_stat_init(struct ispstat *stat, const char *name, 146 + const struct v4l2_subdev_ops *sd_ops); 147 + void omap3isp_stat_free(struct ispstat *stat); 148 + int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev, 149 + struct v4l2_fh *fh, 150 + struct v4l2_event_subscription *sub); 151 + int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, 152 + struct v4l2_fh *fh, 153 + struct v4l2_event_subscription *sub); 154 + int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable); 155 + 156 + int omap3isp_stat_busy(struct ispstat *stat); 157 + int omap3isp_stat_pcr_busy(struct ispstat *stat); 158 + void omap3isp_stat_suspend(struct ispstat *stat); 159 + void omap3isp_stat_resume(struct ispstat *stat); 160 + int omap3isp_stat_enable(struct ispstat *stat, u8 enable); 161 + void omap3isp_stat_sbl_overflow(struct ispstat *stat); 162 + void omap3isp_stat_isr(struct ispstat *stat); 163 + void omap3isp_stat_isr_frame_sync(struct ispstat *stat); 164 + void omap3isp_stat_dma_isr(struct ispstat *stat); 165 + int omap3isp_stat_register_entities(struct ispstat *stat, 166 + struct v4l2_device *vdev); 167 + void omap3isp_stat_unregister_entities(struct ispstat *stat); 168 + 169 + #endif /* OMAP3_ISP_STAT_H */