Lines Matching refs:xsdfec

234 static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,  in xsdfec_regwrite()  argument
237 dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr); in xsdfec_regwrite()
238 iowrite32(value, xsdfec->regs + addr); in xsdfec_regwrite()
241 static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr) in xsdfec_regread() argument
245 rval = ioread32(xsdfec->regs + addr); in xsdfec_regread()
246 dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr); in xsdfec_regread()
250 static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec, in update_bool_config_from_reg() argument
257 reg_val = xsdfec_regread(xsdfec, reg_offset); in update_bool_config_from_reg()
261 static void update_config_from_hw(struct xsdfec_dev *xsdfec) in update_config_from_hw() argument
267 reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR); in update_config_from_hw()
268 xsdfec->config.order = reg_value; in update_config_from_hw()
270 update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR, in update_config_from_hw()
272 &xsdfec->config.bypass); in update_config_from_hw()
274 update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR, in update_config_from_hw()
276 &xsdfec->config.code_wr_protect); in update_config_from_hw()
278 reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); in update_config_from_hw()
279 xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0; in update_config_from_hw()
281 reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); in update_config_from_hw()
282 xsdfec->config.irq.enable_ecc_isr = in update_config_from_hw()
285 reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); in update_config_from_hw()
288 xsdfec->state = XSDFEC_STARTED; in update_config_from_hw()
290 xsdfec->state = XSDFEC_STOPPED; in update_config_from_hw()
293 static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_get_status() argument
299 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_get_status()
300 status.state = xsdfec->state; in xsdfec_get_status()
301 xsdfec->state_updated = false; in xsdfec_get_status()
302 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_get_status()
303 status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) & in xsdfec_get_status()
313 static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_get_config() argument
317 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config)); in xsdfec_get_config()
324 static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable) in xsdfec_isr_enable() argument
330 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK); in xsdfec_isr_enable()
331 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); in xsdfec_isr_enable()
333 dev_dbg(xsdfec->dev, in xsdfec_isr_enable()
339 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK); in xsdfec_isr_enable()
340 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); in xsdfec_isr_enable()
342 dev_dbg(xsdfec->dev, in xsdfec_isr_enable()
350 static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable) in xsdfec_ecc_isr_enable() argument
356 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR, in xsdfec_ecc_isr_enable()
358 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); in xsdfec_ecc_isr_enable()
360 dev_dbg(xsdfec->dev, in xsdfec_ecc_isr_enable()
366 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR, in xsdfec_ecc_isr_enable()
368 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); in xsdfec_ecc_isr_enable()
373 dev_dbg(xsdfec->dev, in xsdfec_ecc_isr_enable()
381 static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_set_irq() argument
393 isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr); in xsdfec_set_irq()
395 xsdfec->config.irq.enable_isr = irq.enable_isr; in xsdfec_set_irq()
398 ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr); in xsdfec_set_irq()
400 xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr; in xsdfec_set_irq()
408 static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_set_turbo() argument
425 if (xsdfec->config.code == XSDFEC_LDPC_CODE) in xsdfec_set_turbo()
431 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write); in xsdfec_set_turbo()
435 static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_get_turbo() argument
441 if (xsdfec->config.code == XSDFEC_LDPC_CODE) in xsdfec_get_turbo()
445 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR); in xsdfec_get_turbo()
458 static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize, in xsdfec_reg0_write() argument
465 dev_dbg(xsdfec->dev, "N value is not in range"); in xsdfec_reg0_write()
472 dev_dbg(xsdfec->dev, "K value is not in range"); in xsdfec_reg0_write()
480 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x", in xsdfec_reg0_write()
485 xsdfec_regwrite(xsdfec, in xsdfec_reg0_write()
492 static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize, in xsdfec_reg1_write() argument
498 dev_dbg(xsdfec->dev, "Psize is not in range"); in xsdfec_reg1_write()
503 dev_dbg(xsdfec->dev, "No-packing bit register invalid"); in xsdfec_reg1_write()
508 dev_dbg(xsdfec->dev, "NM is beyond 10 bits"); in xsdfec_reg1_write()
514 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x", in xsdfec_reg1_write()
519 xsdfec_regwrite(xsdfec, in xsdfec_reg1_write()
526 static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc, in xsdfec_reg2_write() argument
534 dev_dbg(xsdfec->dev, "Nlayers is not in range"); in xsdfec_reg2_write()
539 dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits"); in xsdfec_reg2_write()
543 dev_dbg(xsdfec->dev, "Norm type is invalid"); in xsdfec_reg2_write()
547 dev_dbg(xsdfec->dev, "Special QC in invalid"); in xsdfec_reg2_write()
552 dev_dbg(xsdfec->dev, "No final parity check invalid"); in xsdfec_reg2_write()
558 dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits"); in xsdfec_reg2_write()
567 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x", in xsdfec_reg2_write()
572 xsdfec_regwrite(xsdfec, in xsdfec_reg2_write()
579 static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off, in xsdfec_reg3_write() argument
588 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x", in xsdfec_reg3_write()
593 xsdfec_regwrite(xsdfec, in xsdfec_reg3_write()
600 static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, in xsdfec_table_write() argument
617 dev_dbg(xsdfec->dev, "Write exceeds SC table length"); in xsdfec_table_write()
641 xsdfec_regwrite(xsdfec, in xsdfec_table_write()
654 static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_add_ldpc() argument
663 if (xsdfec->config.code == XSDFEC_TURBO_CODE) { in xsdfec_add_ldpc()
669 if (xsdfec->state == XSDFEC_STARTED) { in xsdfec_add_ldpc()
674 if (xsdfec->config.code_wr_protect) { in xsdfec_add_ldpc()
680 ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize, in xsdfec_add_ldpc()
686 ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm, in xsdfec_add_ldpc()
692 ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc, in xsdfec_add_ldpc()
700 ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off, in xsdfec_add_ldpc()
710 ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n, in xsdfec_add_ldpc()
716 ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table, in xsdfec_add_ldpc()
722 ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table, in xsdfec_add_ldpc()
730 static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_set_order() argument
746 if (xsdfec->state == XSDFEC_STARTED) in xsdfec_set_order()
749 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order); in xsdfec_set_order()
751 xsdfec->config.order = order; in xsdfec_set_order()
756 static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg) in xsdfec_set_bypass() argument
766 if (xsdfec->state == XSDFEC_STARTED) in xsdfec_set_bypass()
770 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1); in xsdfec_set_bypass()
772 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0); in xsdfec_set_bypass()
774 xsdfec->config.bypass = bypass; in xsdfec_set_bypass()
779 static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg) in xsdfec_is_active() argument
785 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR); in xsdfec_is_active()
829 static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec) in xsdfec_cfg_axi_streams() argument
836 struct xsdfec_config *config = &xsdfec->config; in xsdfec_cfg_axi_streams()
853 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value); in xsdfec_cfg_axi_streams()
868 static int xsdfec_start(struct xsdfec_dev *xsdfec) in xsdfec_start() argument
872 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR); in xsdfec_start()
874 if (regread != xsdfec->config.code) { in xsdfec_start()
875 dev_dbg(xsdfec->dev, in xsdfec_start()
877 __func__, regread, xsdfec->config.code); in xsdfec_start()
882 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, in xsdfec_start()
885 xsdfec->state = XSDFEC_STARTED; in xsdfec_start()
889 static int xsdfec_stop(struct xsdfec_dev *xsdfec) in xsdfec_stop() argument
893 if (xsdfec->state != XSDFEC_STARTED) in xsdfec_stop()
894 dev_dbg(xsdfec->dev, "Device not started correctly"); in xsdfec_stop()
896 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); in xsdfec_stop()
898 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread); in xsdfec_stop()
900 xsdfec->state = XSDFEC_STOPPED; in xsdfec_stop()
904 static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec) in xsdfec_clear_stats() argument
906 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_clear_stats()
907 xsdfec->isr_err_count = 0; in xsdfec_clear_stats()
908 xsdfec->uecc_count = 0; in xsdfec_clear_stats()
909 xsdfec->cecc_count = 0; in xsdfec_clear_stats()
910 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_clear_stats()
915 static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg) in xsdfec_get_stats() argument
920 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_get_stats()
921 user_stats.isr_err_count = xsdfec->isr_err_count; in xsdfec_get_stats()
922 user_stats.cecc_count = xsdfec->cecc_count; in xsdfec_get_stats()
923 user_stats.uecc_count = xsdfec->uecc_count; in xsdfec_get_stats()
924 xsdfec->stats_updated = false; in xsdfec_get_stats()
925 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_get_stats()
934 static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec) in xsdfec_set_default_config() argument
937 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); in xsdfec_set_default_config()
938 xsdfec_cfg_axi_streams(xsdfec); in xsdfec_set_default_config()
939 update_config_from_hw(xsdfec); in xsdfec_set_default_config()
947 struct xsdfec_dev *xsdfec; in xsdfec_dev_ioctl() local
951 xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); in xsdfec_dev_ioctl()
954 if (xsdfec->state == XSDFEC_NEEDS_RESET && in xsdfec_dev_ioctl()
962 rval = xsdfec_start(xsdfec); in xsdfec_dev_ioctl()
965 rval = xsdfec_stop(xsdfec); in xsdfec_dev_ioctl()
968 rval = xsdfec_clear_stats(xsdfec); in xsdfec_dev_ioctl()
971 rval = xsdfec_get_stats(xsdfec, arg); in xsdfec_dev_ioctl()
974 rval = xsdfec_get_status(xsdfec, arg); in xsdfec_dev_ioctl()
977 rval = xsdfec_get_config(xsdfec, arg); in xsdfec_dev_ioctl()
980 rval = xsdfec_set_default_config(xsdfec); in xsdfec_dev_ioctl()
983 rval = xsdfec_set_irq(xsdfec, arg); in xsdfec_dev_ioctl()
986 rval = xsdfec_set_turbo(xsdfec, arg); in xsdfec_dev_ioctl()
989 rval = xsdfec_get_turbo(xsdfec, arg); in xsdfec_dev_ioctl()
992 rval = xsdfec_add_ldpc(xsdfec, arg); in xsdfec_dev_ioctl()
995 rval = xsdfec_set_order(xsdfec, arg); in xsdfec_dev_ioctl()
998 rval = xsdfec_set_bypass(xsdfec, arg); in xsdfec_dev_ioctl()
1001 rval = xsdfec_is_active(xsdfec, (bool __user *)arg); in xsdfec_dev_ioctl()
1013 struct xsdfec_dev *xsdfec; in xsdfec_poll() local
1015 xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); in xsdfec_poll()
1017 poll_wait(file, &xsdfec->waitq, wait); in xsdfec_poll()
1020 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_poll()
1021 if (xsdfec->state_updated) in xsdfec_poll()
1024 if (xsdfec->stats_updated) in xsdfec_poll()
1026 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_poll()
1040 static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) in xsdfec_parse_of() argument
1042 struct device *dev = xsdfec->dev; in xsdfec_parse_of()
1056 xsdfec->config.code = XSDFEC_LDPC_CODE; in xsdfec_parse_of()
1058 xsdfec->config.code = XSDFEC_TURBO_CODE; in xsdfec_parse_of()
1068 xsdfec->config.din_word_include = din_word_include; in xsdfec_parse_of()
1081 xsdfec->config.din_width = din_width; in xsdfec_parse_of()
1093 xsdfec->config.dout_word_include = dout_word_include; in xsdfec_parse_of()
1106 xsdfec->config.dout_width = dout_width; in xsdfec_parse_of()
1113 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); in xsdfec_parse_of()
1115 xsdfec_cfg_axi_streams(xsdfec); in xsdfec_parse_of()
1122 struct xsdfec_dev *xsdfec = dev_id; in xsdfec_irq_thread() local
1132 WARN_ON(xsdfec->irq != irq); in xsdfec_irq_thread()
1135 xsdfec_isr_enable(xsdfec, false); in xsdfec_irq_thread()
1136 xsdfec_ecc_isr_enable(xsdfec, false); in xsdfec_irq_thread()
1138 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR); in xsdfec_irq_thread()
1139 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR); in xsdfec_irq_thread()
1141 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err); in xsdfec_irq_thread()
1142 xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err); in xsdfec_irq_thread()
1153 dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp, in xsdfec_irq_thread()
1155 dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count, in xsdfec_irq_thread()
1156 xsdfec->cecc_count, xsdfec->isr_err_count); in xsdfec_irq_thread()
1158 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_irq_thread()
1161 xsdfec->uecc_count += uecc_count; in xsdfec_irq_thread()
1164 xsdfec->cecc_count += cecc_count; in xsdfec_irq_thread()
1167 xsdfec->isr_err_count += isr_err_count; in xsdfec_irq_thread()
1172 xsdfec->state = XSDFEC_NEEDS_RESET; in xsdfec_irq_thread()
1174 xsdfec->state = XSDFEC_PL_RECONFIGURE; in xsdfec_irq_thread()
1175 xsdfec->stats_updated = true; in xsdfec_irq_thread()
1176 xsdfec->state_updated = true; in xsdfec_irq_thread()
1180 xsdfec->stats_updated = true; in xsdfec_irq_thread()
1183 xsdfec->state = XSDFEC_NEEDS_RESET; in xsdfec_irq_thread()
1184 xsdfec->stats_updated = true; in xsdfec_irq_thread()
1185 xsdfec->state_updated = true; in xsdfec_irq_thread()
1188 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); in xsdfec_irq_thread()
1189 dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated, in xsdfec_irq_thread()
1190 xsdfec->stats_updated); in xsdfec_irq_thread()
1193 if (xsdfec->state_updated || xsdfec->stats_updated) in xsdfec_irq_thread()
1194 wake_up_interruptible(&xsdfec->waitq); in xsdfec_irq_thread()
1199 xsdfec_isr_enable(xsdfec, true); in xsdfec_irq_thread()
1200 xsdfec_ecc_isr_enable(xsdfec, true); in xsdfec_irq_thread()
1360 struct xsdfec_dev *xsdfec; in xsdfec_probe() local
1366 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); in xsdfec_probe()
1367 if (!xsdfec) in xsdfec_probe()
1370 xsdfec->dev = &pdev->dev; in xsdfec_probe()
1371 spin_lock_init(&xsdfec->error_data_lock); in xsdfec_probe()
1373 err = xsdfec_clk_init(pdev, &xsdfec->clks); in xsdfec_probe()
1377 dev = xsdfec->dev; in xsdfec_probe()
1379 xsdfec->regs = devm_ioremap_resource(dev, res); in xsdfec_probe()
1380 if (IS_ERR(xsdfec->regs)) { in xsdfec_probe()
1381 err = PTR_ERR(xsdfec->regs); in xsdfec_probe()
1385 xsdfec->irq = platform_get_irq(pdev, 0); in xsdfec_probe()
1386 if (xsdfec->irq < 0) { in xsdfec_probe()
1391 err = xsdfec_parse_of(xsdfec); in xsdfec_probe()
1395 update_config_from_hw(xsdfec); in xsdfec_probe()
1398 platform_set_drvdata(pdev, xsdfec); in xsdfec_probe()
1401 init_waitqueue_head(&xsdfec->waitq); in xsdfec_probe()
1403 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL, in xsdfec_probe()
1405 "xilinx-sdfec16", xsdfec); in xsdfec_probe()
1407 dev_err(dev, "unable to request IRQ%d", xsdfec->irq); in xsdfec_probe()
1415 xsdfec->dev_id = err; in xsdfec_probe()
1417 snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id); in xsdfec_probe()
1418 xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR; in xsdfec_probe()
1419 xsdfec->miscdev.name = xsdfec->dev_name; in xsdfec_probe()
1420 xsdfec->miscdev.fops = &xsdfec_fops; in xsdfec_probe()
1421 xsdfec->miscdev.parent = dev; in xsdfec_probe()
1422 err = misc_register(&xsdfec->miscdev); in xsdfec_probe()
1430 ida_free(&dev_nrs, xsdfec->dev_id); in xsdfec_probe()
1432 xsdfec_disable_all_clks(&xsdfec->clks); in xsdfec_probe()
1438 struct xsdfec_dev *xsdfec; in xsdfec_remove() local
1440 xsdfec = platform_get_drvdata(pdev); in xsdfec_remove()
1441 misc_deregister(&xsdfec->miscdev); in xsdfec_remove()
1442 ida_free(&dev_nrs, xsdfec->dev_id); in xsdfec_remove()
1443 xsdfec_disable_all_clks(&xsdfec->clks); in xsdfec_remove()