Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

misc: xilinx_sdfec: Support poll file operation

Support monitoring and detecting the SD-FEC error events
through IRQ and poll file operation.

The SD-FEC device can detect one-error or multi-error events.
An error triggers an interrupt which creates and run the ONE_SHOT
IRQ thread.
The ONE_SHOT IRQ thread detects type of error and pass that
information to the poll function.
The file_operation callback poll(), collects the events and
updates the statistics accordingly.
The function poll blocks() on waiting queue which can be
unblocked by ONE_SHOT IRQ handling thread.

Support SD-FEC interrupt set ioctl callback.
The SD-FEC can detect two type of errors: coding errors (ECC) and
a data interface errors (TLAST).
The errors are events which can trigger an IRQ if enabled.
The driver can monitor and detect these errors through IRQ.
Also the driver updates the statistical data.

Tested-by: Dragan Cvetic <dragan.cvetic@xilinx.com>
Signed-off-by: Derek Kiernan <derek.kiernan@xilinx.com>
Signed-off-by: Dragan Cvetic <dragan.cvetic@xilinx.com>
Link: https://lore.kernel.org/r/1564216438-322406-6-git-send-email-dragan.cvetic@xilinx.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Dragan Cvetic and committed by
Greg Kroah-Hartman
cc538f60 77dd39d9

+249 -4
+236 -4
drivers/misc/xilinx_sdfec.c
··· 191 191 * struct xsdfec_dev - Driver data for SDFEC 192 192 * @miscdev: Misc device handle 193 193 * @clks: Clocks managed by the SDFEC driver 194 - * @regs: device physical base address 195 - * @dev: pointer to device struct 194 + * @waitq: Driver wait queue 196 195 * @config: Configuration of the SDFEC device 197 196 * @dev_name: Device name 197 + * @flags: spinlock flags 198 + * @regs: device physical base address 199 + * @dev: pointer to device struct 198 200 * @state: State of the SDFEC device 199 201 * @error_data_lock: Error counter and states spinlock 200 202 * @dev_id: Device ID 203 + * @isr_err_count: Count of ISR errors 204 + * @cecc_count: Count of Correctable ECC errors (SBE) 205 + * @uecc_count: Count of Uncorrectable ECC errors (MBE) 206 + * @irq: IRQ number 207 + * @state_updated: indicates State updated by interrupt handler 208 + * @stats_updated: indicates Stats updated by interrupt handler 201 209 * 202 210 * This structure contains necessary state for SDFEC driver to operate 203 211 */ 204 212 struct xsdfec_dev { 205 213 struct miscdevice miscdev; 206 214 struct xsdfec_clks clks; 207 - void __iomem *regs; 208 - struct device *dev; 215 + wait_queue_head_t waitq; 209 216 struct xsdfec_config config; 210 217 char dev_name[DEV_NAME_LEN]; 218 + unsigned long flags; 219 + void __iomem *regs; 220 + struct device *dev; 211 221 enum xsdfec_state state; 212 222 /* Spinlock to protect state_updated and stats_updated */ 213 223 spinlock_t error_data_lock; 214 224 int dev_id; 225 + u32 isr_err_count; 226 + u32 cecc_count; 227 + u32 uecc_count; 228 + int irq; 229 + bool state_updated; 230 + bool stats_updated; 215 231 }; 216 232 217 233 static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, ··· 296 280 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config)); 297 281 if (err) 298 282 err = -EFAULT; 283 + 284 + return err; 285 + } 286 + 287 + static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable) 288 + { 289 + u32 mask_read; 290 + 291 + if (enable) { 292 + /* Enable */ 293 + xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK); 294 + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); 295 + if (mask_read & XSDFEC_ISR_MASK) { 296 + dev_dbg(xsdfec->dev, 297 + "SDFEC enabling irq with IER failed"); 298 + return -EIO; 299 + } 300 + } else { 301 + /* Disable */ 302 + xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK); 303 + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); 304 + if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) { 305 + dev_dbg(xsdfec->dev, 306 + "SDFEC disabling irq with IDR failed"); 307 + return -EIO; 308 + } 309 + } 310 + return 0; 311 + } 312 + 313 + static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable) 314 + { 315 + u32 mask_read; 316 + 317 + if (enable) { 318 + /* Enable */ 319 + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR, 320 + XSDFEC_ALL_ECC_ISR_MASK); 321 + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); 322 + if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) { 323 + dev_dbg(xsdfec->dev, 324 + "SDFEC enabling ECC irq with ECC IER failed"); 325 + return -EIO; 326 + } 327 + } else { 328 + /* Disable */ 329 + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR, 330 + XSDFEC_ALL_ECC_ISR_MASK); 331 + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); 332 + if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == 333 + XSDFEC_ECC_ISR_MASK) || 334 + ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == 335 + XSDFEC_PL_INIT_ECC_ISR_MASK))) { 336 + dev_dbg(xsdfec->dev, 337 + "SDFEC disable ECC irq with ECC IDR failed"); 338 + return -EIO; 339 + } 340 + } 341 + return 0; 342 + } 343 + 344 + static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg) 345 + { 346 + struct xsdfec_irq irq; 347 + int err; 348 + int isr_err; 349 + int ecc_err; 350 + 351 + err = copy_from_user(&irq, arg, sizeof(irq)); 352 + if (err) 353 + return -EFAULT; 354 + 355 + /* Setup tlast related IRQ */ 356 + isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr); 357 + if (!isr_err) 358 + xsdfec->config.irq.enable_isr = irq.enable_isr; 359 + 360 + /* Setup ECC related IRQ */ 361 + ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr); 362 + if (!ecc_err) 363 + xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr; 364 + 365 + if (isr_err < 0 || ecc_err < 0) 366 + err = -EIO; 299 367 300 368 return err; 301 369 } ··· 860 760 case XSDFEC_GET_CONFIG: 861 761 rval = xsdfec_get_config(xsdfec, arg); 862 762 break; 763 + case XSDFEC_SET_IRQ: 764 + rval = xsdfec_set_irq(xsdfec, arg); 765 + break; 863 766 case XSDFEC_SET_TURBO: 864 767 rval = xsdfec_set_turbo(xsdfec, arg); 865 768 break; ··· 896 793 } 897 794 #endif 898 795 796 + static unsigned int xsdfec_poll(struct file *file, poll_table *wait) 797 + { 798 + unsigned int mask = 0; 799 + struct xsdfec_dev *xsdfec; 800 + 801 + xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); 802 + 803 + if (!xsdfec) 804 + return POLLNVAL | POLLHUP; 805 + 806 + poll_wait(file, &xsdfec->waitq, wait); 807 + 808 + /* XSDFEC ISR detected an error */ 809 + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); 810 + if (xsdfec->state_updated) 811 + mask |= POLLIN | POLLPRI; 812 + 813 + if (xsdfec->stats_updated) 814 + mask |= POLLIN | POLLRDNORM; 815 + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); 816 + 817 + return mask; 818 + } 819 + 899 820 static const struct file_operations xsdfec_fops = { 900 821 .owner = THIS_MODULE, 901 822 .open = xsdfec_dev_open, 902 823 .release = xsdfec_dev_release, 903 824 .unlocked_ioctl = xsdfec_dev_ioctl, 825 + .poll = xsdfec_poll, 904 826 #ifdef CONFIG_COMPAT 905 827 .compat_ioctl = xsdfec_dev_compat_ioctl, 906 828 #endif ··· 1009 881 xsdfec_cfg_axi_streams(xsdfec); 1010 882 1011 883 return 0; 884 + } 885 + 886 + static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id) 887 + { 888 + struct xsdfec_dev *xsdfec = dev_id; 889 + irqreturn_t ret = IRQ_HANDLED; 890 + u32 ecc_err; 891 + u32 isr_err; 892 + u32 uecc_count; 893 + u32 cecc_count; 894 + u32 isr_err_count; 895 + u32 aecc_count; 896 + u32 tmp; 897 + 898 + WARN_ON(xsdfec->irq != irq); 899 + 900 + /* Mask Interrupts */ 901 + xsdfec_isr_enable(xsdfec, false); 902 + xsdfec_ecc_isr_enable(xsdfec, false); 903 + /* Read ISR */ 904 + ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR); 905 + isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR); 906 + /* Clear the interrupts */ 907 + xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err); 908 + xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err); 909 + 910 + tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK; 911 + /* Count uncorrectable 2-bit errors */ 912 + uecc_count = hweight32(tmp); 913 + /* Count all ECC errors */ 914 + aecc_count = hweight32(ecc_err); 915 + /* Number of correctable 1-bit ECC error */ 916 + cecc_count = aecc_count - 2 * uecc_count; 917 + /* Count ISR errors */ 918 + isr_err_count = hweight32(isr_err); 919 + dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp, 920 + uecc_count, aecc_count, cecc_count, isr_err_count); 921 + dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count, 922 + xsdfec->cecc_count, xsdfec->isr_err_count); 923 + 924 + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); 925 + /* Add new errors to a 2-bits counter */ 926 + if (uecc_count) 927 + xsdfec->uecc_count += uecc_count; 928 + /* Add new errors to a 1-bits counter */ 929 + if (cecc_count) 930 + xsdfec->cecc_count += cecc_count; 931 + /* Add new errors to a ISR counter */ 932 + if (isr_err_count) 933 + xsdfec->isr_err_count += isr_err_count; 934 + 935 + /* Update state/stats flag */ 936 + if (uecc_count) { 937 + if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK) 938 + xsdfec->state = XSDFEC_NEEDS_RESET; 939 + else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) 940 + xsdfec->state = XSDFEC_PL_RECONFIGURE; 941 + xsdfec->stats_updated = true; 942 + xsdfec->state_updated = true; 943 + } 944 + 945 + if (cecc_count) 946 + xsdfec->stats_updated = true; 947 + 948 + if (isr_err_count) { 949 + xsdfec->state = XSDFEC_NEEDS_RESET; 950 + xsdfec->stats_updated = true; 951 + xsdfec->state_updated = true; 952 + } 953 + 954 + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); 955 + dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated, 956 + xsdfec->stats_updated); 957 + 958 + /* Enable another polling */ 959 + if (xsdfec->state_updated || xsdfec->stats_updated) 960 + wake_up_interruptible(&xsdfec->waitq); 961 + else 962 + ret = IRQ_NONE; 963 + 964 + /* Unmask Interrupts */ 965 + xsdfec_isr_enable(xsdfec, true); 966 + xsdfec_ecc_isr_enable(xsdfec, true); 967 + 968 + return ret; 1012 969 } 1013 970 1014 971 static int xsdfec_clk_init(struct platform_device *pdev, ··· 1262 1049 struct device *dev; 1263 1050 struct resource *res; 1264 1051 int err; 1052 + bool irq_enabled = true; 1265 1053 1266 1054 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); 1267 1055 if (!xsdfec) ··· 1283 1069 goto err_xsdfec_dev; 1284 1070 } 1285 1071 1072 + xsdfec->irq = platform_get_irq(pdev, 0); 1073 + if (xsdfec->irq < 0) { 1074 + dev_dbg(dev, "platform_get_irq failed"); 1075 + irq_enabled = false; 1076 + } 1077 + 1286 1078 err = xsdfec_parse_of(xsdfec); 1287 1079 if (err < 0) 1288 1080 goto err_xsdfec_dev; ··· 1297 1077 1298 1078 /* Save driver private data */ 1299 1079 platform_set_drvdata(pdev, xsdfec); 1080 + 1081 + if (irq_enabled) { 1082 + init_waitqueue_head(&xsdfec->waitq); 1083 + /* Register IRQ thread */ 1084 + err = devm_request_threaded_irq(dev, xsdfec->irq, NULL, 1085 + xsdfec_irq_thread, IRQF_ONESHOT, 1086 + "xilinx-sdfec16", xsdfec); 1087 + if (err < 0) { 1088 + dev_err(dev, "unable to request IRQ%d", xsdfec->irq); 1089 + goto err_xsdfec_dev; 1090 + } 1091 + } 1300 1092 1301 1093 mutex_lock(&dev_idr_lock); 1302 1094 err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL);
+13
include/uapi/misc/xilinx_sdfec.h
··· 252 252 */ 253 253 #define XSDFEC_MAGIC 'f' 254 254 /** 255 + * DOC: XSDFEC_SET_IRQ 256 + * @Parameters 257 + * 258 + * @struct xsdfec_irq * 259 + * Pointer to the &struct xsdfec_irq that contains the interrupt settings 260 + * for the SD-FEC core 261 + * 262 + * @Description 263 + * 264 + * ioctl to enable or disable irq 265 + */ 266 + #define XSDFEC_SET_IRQ _IOW(XSDFEC_MAGIC, 3, struct xsdfec_irq) 267 + /** 255 268 * DOC: XSDFEC_SET_TURBO 256 269 * @Parameters 257 270 *