1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3 *
4 * Copyright (C) 2012 - 2022 Xilinx, Inc.
5 * Copyright (C) 2009 PetaLogix. All rights reserved.
6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7 *
8 * Description:
9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/ethtool.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/platform_device.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/can/dev.h>
30 #include <linux/can/error.h>
31 #include <linux/pm_runtime.h>
32
33 #define DRIVER_NAME "xilinx_can"
34
35 /* CAN registers set */
36 enum xcan_reg {
37 XCAN_SRR_OFFSET = 0x00, /* Software reset */
38 XCAN_MSR_OFFSET = 0x04, /* Mode select */
39 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
40 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
41 XCAN_ECR_OFFSET = 0x10, /* Error counter */
42 XCAN_ESR_OFFSET = 0x14, /* Error status */
43 XCAN_SR_OFFSET = 0x18, /* Status */
44 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
45 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
46 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
47
48 /* not on CAN FD cores */
49 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
50 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
51 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
52
53 /* only on CAN FD cores */
54 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
55 * Prescaler
56 */
57 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
58 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
59 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
60 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
61 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
62 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
63 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
64 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
65 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
66 };
67
68 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
69 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
70 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
71 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
72 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
73
74 #define XCAN_CANFD_FRAME_SIZE 0x48
75 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
76 XCAN_CANFD_FRAME_SIZE * (n))
77 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
78 XCAN_CANFD_FRAME_SIZE * (n))
79 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
80 XCAN_CANFD_FRAME_SIZE * (n))
81
82 /* the single TX mailbox used by this driver on CAN FD HW */
83 #define XCAN_TX_MAILBOX_IDX 0
84
85 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
86 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
87 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
88 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
89 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
90 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
91 #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
92 #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
93 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
94 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
95 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
96 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
97 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
98 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
99 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
100 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
101 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
102 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
103 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
104 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
105 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
106 #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
107 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
108 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
109 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
110 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
111 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
112 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
113 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
114 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
115 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
116 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
117 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
118 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
119 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
120 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
121 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
122 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
123 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
124 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
125 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
126 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
127 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
128 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
129 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
130 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
131 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
132 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
133 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
134 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
135 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
136 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
137 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
138
139 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
140 #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
141 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
142 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
143 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
144 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
145 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
146 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
147 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
148 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
149
150 /* CAN frame length constants */
151 #define XCAN_FRAME_MAX_DATA_LEN 8
152 #define XCANFD_DW_BYTES 4
153 #define XCAN_TIMEOUT (1 * HZ)
154
155 /* TX-FIFO-empty interrupt available */
156 #define XCAN_FLAG_TXFEMP 0x0001
157 /* RX Match Not Finished interrupt available */
158 #define XCAN_FLAG_RXMNF 0x0002
159 /* Extended acceptance filters with control at 0xE0 */
160 #define XCAN_FLAG_EXT_FILTERS 0x0004
161 /* TX mailboxes instead of TX FIFO */
162 #define XCAN_FLAG_TX_MAILBOXES 0x0008
163 /* RX FIFO with each buffer in separate registers at 0x1100
164 * instead of the regular FIFO at 0x50
165 */
166 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
167 #define XCAN_FLAG_CANFD_2 0x0020
168
169 enum xcan_ip_type {
170 XAXI_CAN = 0,
171 XZYNQ_CANPS,
172 XAXI_CANFD,
173 XAXI_CANFD_2_0,
174 };
175
176 struct xcan_devtype_data {
177 enum xcan_ip_type cantype;
178 unsigned int flags;
179 const struct can_bittiming_const *bittiming_const;
180 const char *bus_clk_name;
181 unsigned int btr_ts2_shift;
182 unsigned int btr_sjw_shift;
183 };
184
185 /**
186 * struct xcan_priv - This definition define CAN driver instance
187 * @can: CAN private data structure.
188 * @tx_lock: Lock for synchronizing TX interrupt handling
189 * @tx_head: Tx CAN packets ready to send on the queue
190 * @tx_tail: Tx CAN packets successfully sended on the queue
191 * @tx_max: Maximum number packets the driver can send
192 * @napi: NAPI structure
193 * @read_reg: For reading data from CAN registers
194 * @write_reg: For writing data to CAN registers
195 * @dev: Network device data structure
196 * @reg_base: Ioremapped address to registers
197 * @irq_flags: For request_irq()
198 * @bus_clk: Pointer to struct clk
199 * @can_clk: Pointer to struct clk
200 * @devtype: Device type specific constants
201 */
202 struct xcan_priv {
203 struct can_priv can;
204 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
205 unsigned int tx_head;
206 unsigned int tx_tail;
207 unsigned int tx_max;
208 struct napi_struct napi;
209 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
210 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
211 u32 val);
212 struct device *dev;
213 void __iomem *reg_base;
214 unsigned long irq_flags;
215 struct clk *bus_clk;
216 struct clk *can_clk;
217 struct xcan_devtype_data devtype;
218 };
219
220 /* CAN Bittiming constants as per Xilinx CAN specs */
221 static const struct can_bittiming_const xcan_bittiming_const = {
222 .name = DRIVER_NAME,
223 .tseg1_min = 1,
224 .tseg1_max = 16,
225 .tseg2_min = 1,
226 .tseg2_max = 8,
227 .sjw_max = 4,
228 .brp_min = 1,
229 .brp_max = 256,
230 .brp_inc = 1,
231 };
232
233 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
234 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
235 .name = DRIVER_NAME,
236 .tseg1_min = 1,
237 .tseg1_max = 64,
238 .tseg2_min = 1,
239 .tseg2_max = 16,
240 .sjw_max = 16,
241 .brp_min = 1,
242 .brp_max = 256,
243 .brp_inc = 1,
244 };
245
246 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
247 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
248 .name = DRIVER_NAME,
249 .tseg1_min = 1,
250 .tseg1_max = 16,
251 .tseg2_min = 1,
252 .tseg2_max = 8,
253 .sjw_max = 8,
254 .brp_min = 1,
255 .brp_max = 256,
256 .brp_inc = 1,
257 };
258
259 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
260 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
261 .name = DRIVER_NAME,
262 .tseg1_min = 1,
263 .tseg1_max = 256,
264 .tseg2_min = 1,
265 .tseg2_max = 128,
266 .sjw_max = 128,
267 .brp_min = 1,
268 .brp_max = 256,
269 .brp_inc = 1,
270 };
271
272 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
273 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
274 .name = DRIVER_NAME,
275 .tseg1_min = 1,
276 .tseg1_max = 32,
277 .tseg2_min = 1,
278 .tseg2_max = 16,
279 .sjw_max = 16,
280 .brp_min = 1,
281 .brp_max = 256,
282 .brp_inc = 1,
283 };
284
285 /* Transmission Delay Compensation constants for CANFD 1.0 */
286 static const struct can_tdc_const xcan_tdc_const_canfd = {
287 .tdcv_min = 0,
288 .tdcv_max = 0, /* Manual mode not supported. */
289 .tdco_min = 0,
290 .tdco_max = 32,
291 .tdcf_min = 0, /* Filter window not supported */
292 .tdcf_max = 0,
293 };
294
295 /* Transmission Delay Compensation constants for CANFD 2.0 */
296 static const struct can_tdc_const xcan_tdc_const_canfd2 = {
297 .tdcv_min = 0,
298 .tdcv_max = 0, /* Manual mode not supported. */
299 .tdco_min = 0,
300 .tdco_max = 64,
301 .tdcf_min = 0, /* Filter window not supported */
302 .tdcf_max = 0,
303 };
304
305 /**
306 * xcan_write_reg_le - Write a value to the device register little endian
307 * @priv: Driver private data structure
308 * @reg: Register offset
309 * @val: Value to write at the Register offset
310 *
311 * Write data to the paricular CAN register
312 */
xcan_write_reg_le(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)313 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
314 u32 val)
315 {
316 iowrite32(val, priv->reg_base + reg);
317 }
318
319 /**
320 * xcan_read_reg_le - Read a value from the device register little endian
321 * @priv: Driver private data structure
322 * @reg: Register offset
323 *
324 * Read data from the particular CAN register
325 * Return: value read from the CAN register
326 */
xcan_read_reg_le(const struct xcan_priv * priv,enum xcan_reg reg)327 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
328 {
329 return ioread32(priv->reg_base + reg);
330 }
331
332 /**
333 * xcan_write_reg_be - Write a value to the device register big endian
334 * @priv: Driver private data structure
335 * @reg: Register offset
336 * @val: Value to write at the Register offset
337 *
338 * Write data to the paricular CAN register
339 */
xcan_write_reg_be(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)340 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
341 u32 val)
342 {
343 iowrite32be(val, priv->reg_base + reg);
344 }
345
346 /**
347 * xcan_read_reg_be - Read a value from the device register big endian
348 * @priv: Driver private data structure
349 * @reg: Register offset
350 *
351 * Read data from the particular CAN register
352 * Return: value read from the CAN register
353 */
xcan_read_reg_be(const struct xcan_priv * priv,enum xcan_reg reg)354 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
355 {
356 return ioread32be(priv->reg_base + reg);
357 }
358
359 /**
360 * xcan_rx_int_mask - Get the mask for the receive interrupt
361 * @priv: Driver private data structure
362 *
363 * Return: The receive interrupt mask used by the driver on this HW
364 */
xcan_rx_int_mask(const struct xcan_priv * priv)365 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
366 {
367 /* RXNEMP is better suited for our use case as it cannot be cleared
368 * while the FIFO is non-empty, but CAN FD HW does not have it
369 */
370 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
371 return XCAN_IXR_RXOK_MASK;
372 else
373 return XCAN_IXR_RXNEMP_MASK;
374 }
375
376 /**
377 * set_reset_mode - Resets the CAN device mode
378 * @ndev: Pointer to net_device structure
379 *
380 * This is the driver reset mode routine.The driver
381 * enters into configuration mode.
382 *
383 * Return: 0 on success and failure value on error
384 */
set_reset_mode(struct net_device * ndev)385 static int set_reset_mode(struct net_device *ndev)
386 {
387 struct xcan_priv *priv = netdev_priv(ndev);
388 unsigned long timeout;
389
390 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
391
392 timeout = jiffies + XCAN_TIMEOUT;
393 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
394 if (time_after(jiffies, timeout)) {
395 netdev_warn(ndev, "timed out for config mode\n");
396 return -ETIMEDOUT;
397 }
398 usleep_range(500, 10000);
399 }
400
401 /* reset clears FIFOs */
402 priv->tx_head = 0;
403 priv->tx_tail = 0;
404
405 return 0;
406 }
407
408 /**
409 * xcan_set_bittiming - CAN set bit timing routine
410 * @ndev: Pointer to net_device structure
411 *
412 * This is the driver set bittiming routine.
413 * Return: 0 on success and failure value on error
414 */
xcan_set_bittiming(struct net_device * ndev)415 static int xcan_set_bittiming(struct net_device *ndev)
416 {
417 struct xcan_priv *priv = netdev_priv(ndev);
418 struct can_bittiming *bt = &priv->can.bittiming;
419 struct can_bittiming *dbt = &priv->can.data_bittiming;
420 u32 btr0, btr1;
421 u32 is_config_mode;
422
423 /* Check whether Xilinx CAN is in configuration mode.
424 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
425 */
426 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
427 XCAN_SR_CONFIG_MASK;
428 if (!is_config_mode) {
429 netdev_alert(ndev,
430 "BUG! Cannot set bittiming - CAN is not in config mode\n");
431 return -EPERM;
432 }
433
434 /* Setting Baud Rate prescaler value in BRPR Register */
435 btr0 = (bt->brp - 1);
436
437 /* Setting Time Segment 1 in BTR Register */
438 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
439
440 /* Setting Time Segment 2 in BTR Register */
441 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
442
443 /* Setting Synchronous jump width in BTR Register */
444 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
445
446 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
447 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
448
449 if (priv->devtype.cantype == XAXI_CANFD ||
450 priv->devtype.cantype == XAXI_CANFD_2_0) {
451 /* Setting Baud Rate prescaler value in F_BRPR Register */
452 btr0 = dbt->brp - 1;
453 if (can_tdc_is_enabled(&priv->can)) {
454 if (priv->devtype.cantype == XAXI_CANFD)
455 btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
456 XCAN_BRPR_TDC_ENABLE;
457 else
458 btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
459 XCAN_BRPR_TDC_ENABLE;
460 }
461
462 /* Setting Time Segment 1 in BTR Register */
463 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
464
465 /* Setting Time Segment 2 in BTR Register */
466 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
467
468 /* Setting Synchronous jump width in BTR Register */
469 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
470
471 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
472 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
473 }
474
475 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
476 priv->read_reg(priv, XCAN_BRPR_OFFSET),
477 priv->read_reg(priv, XCAN_BTR_OFFSET));
478
479 return 0;
480 }
481
482 /**
483 * xcan_chip_start - This the drivers start routine
484 * @ndev: Pointer to net_device structure
485 *
486 * This is the drivers start routine.
487 * Based on the State of the CAN device it puts
488 * the CAN device into a proper mode.
489 *
490 * Return: 0 on success and failure value on error
491 */
xcan_chip_start(struct net_device * ndev)492 static int xcan_chip_start(struct net_device *ndev)
493 {
494 struct xcan_priv *priv = netdev_priv(ndev);
495 u32 reg_msr;
496 int err;
497 u32 ier;
498
499 /* Check if it is in reset mode */
500 err = set_reset_mode(ndev);
501 if (err < 0)
502 return err;
503
504 err = xcan_set_bittiming(ndev);
505 if (err < 0)
506 return err;
507
508 /* Enable interrupts
509 *
510 * We enable the ERROR interrupt even with
511 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
512 * dedicated interrupt for a state change to
513 * ERROR_WARNING/ERROR_PASSIVE.
514 */
515 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
516 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
517 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
518 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
519
520 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
521 ier |= XCAN_IXR_RXMNF_MASK;
522
523 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
524
525 /* Check whether it is loopback mode or normal mode */
526 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
527 reg_msr = XCAN_MSR_LBACK_MASK;
528 else
529 reg_msr = 0x0;
530
531 /* enable the first extended filter, if any, as cores with extended
532 * filtering default to non-receipt if all filters are disabled
533 */
534 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
535 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
536
537 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
538 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
539
540 netdev_dbg(ndev, "status:#x%08x\n",
541 priv->read_reg(priv, XCAN_SR_OFFSET));
542
543 priv->can.state = CAN_STATE_ERROR_ACTIVE;
544 return 0;
545 }
546
547 /**
548 * xcan_do_set_mode - This sets the mode of the driver
549 * @ndev: Pointer to net_device structure
550 * @mode: Tells the mode of the driver
551 *
552 * This check the drivers state and calls the corresponding modes to set.
553 *
554 * Return: 0 on success and failure value on error
555 */
xcan_do_set_mode(struct net_device * ndev,enum can_mode mode)556 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
557 {
558 int ret;
559
560 switch (mode) {
561 case CAN_MODE_START:
562 ret = xcan_chip_start(ndev);
563 if (ret < 0) {
564 netdev_err(ndev, "xcan_chip_start failed!\n");
565 return ret;
566 }
567 netif_wake_queue(ndev);
568 break;
569 default:
570 ret = -EOPNOTSUPP;
571 break;
572 }
573
574 return ret;
575 }
576
577 /**
578 * xcan_write_frame - Write a frame to HW
579 * @ndev: Pointer to net_device structure
580 * @skb: sk_buff pointer that contains data to be Txed
581 * @frame_offset: Register offset to write the frame to
582 */
xcan_write_frame(struct net_device * ndev,struct sk_buff * skb,int frame_offset)583 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
584 int frame_offset)
585 {
586 u32 id, dlc, data[2] = {0, 0};
587 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
588 u32 ramoff, dwindex = 0, i;
589 struct xcan_priv *priv = netdev_priv(ndev);
590
591 /* Watch carefully on the bit sequence */
592 if (cf->can_id & CAN_EFF_FLAG) {
593 /* Extended CAN ID format */
594 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
595 XCAN_IDR_ID2_MASK;
596 id |= (((cf->can_id & CAN_EFF_MASK) >>
597 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
598 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
599
600 /* The substibute remote TX request bit should be "1"
601 * for extended frames as in the Xilinx CAN datasheet
602 */
603 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
604
605 if (cf->can_id & CAN_RTR_FLAG)
606 /* Extended frames remote TX request */
607 id |= XCAN_IDR_RTR_MASK;
608 } else {
609 /* Standard CAN ID format */
610 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
611 XCAN_IDR_ID1_MASK;
612
613 if (cf->can_id & CAN_RTR_FLAG)
614 /* Standard frames remote TX request */
615 id |= XCAN_IDR_SRR_MASK;
616 }
617
618 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
619 if (can_is_canfd_skb(skb)) {
620 if (cf->flags & CANFD_BRS)
621 dlc |= XCAN_DLCR_BRS_MASK;
622 dlc |= XCAN_DLCR_EDL_MASK;
623 }
624
625 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
626 (priv->devtype.flags & XCAN_FLAG_TXFEMP))
627 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
628 else
629 can_put_echo_skb(skb, ndev, 0, 0);
630
631 priv->tx_head++;
632
633 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
634 /* If the CAN frame is RTR frame this write triggers transmission
635 * (not on CAN FD)
636 */
637 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
638 if (priv->devtype.cantype == XAXI_CANFD ||
639 priv->devtype.cantype == XAXI_CANFD_2_0) {
640 for (i = 0; i < cf->len; i += 4) {
641 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
642 (dwindex * XCANFD_DW_BYTES);
643 priv->write_reg(priv, ramoff,
644 be32_to_cpup((__be32 *)(cf->data + i)));
645 dwindex++;
646 }
647 } else {
648 if (cf->len > 0)
649 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
650 if (cf->len > 4)
651 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
652
653 if (!(cf->can_id & CAN_RTR_FLAG)) {
654 priv->write_reg(priv,
655 XCAN_FRAME_DW1_OFFSET(frame_offset),
656 data[0]);
657 /* If the CAN frame is Standard/Extended frame this
658 * write triggers transmission (not on CAN FD)
659 */
660 priv->write_reg(priv,
661 XCAN_FRAME_DW2_OFFSET(frame_offset),
662 data[1]);
663 }
664 }
665 }
666
667 /**
668 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
669 * @skb: sk_buff pointer that contains data to be Txed
670 * @ndev: Pointer to net_device structure
671 *
672 * Return: 0 on success, -ENOSPC if FIFO is full.
673 */
xcan_start_xmit_fifo(struct sk_buff * skb,struct net_device * ndev)674 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
675 {
676 struct xcan_priv *priv = netdev_priv(ndev);
677 unsigned long flags;
678
679 /* Check if the TX buffer is full */
680 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
681 XCAN_SR_TXFLL_MASK))
682 return -ENOSPC;
683
684 spin_lock_irqsave(&priv->tx_lock, flags);
685
686 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
687
688 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
689 if (priv->tx_max > 1)
690 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
691
692 /* Check if the TX buffer is full */
693 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
694 netif_stop_queue(ndev);
695
696 spin_unlock_irqrestore(&priv->tx_lock, flags);
697
698 return 0;
699 }
700
701 /**
702 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
703 * @skb: sk_buff pointer that contains data to be Txed
704 * @ndev: Pointer to net_device structure
705 *
706 * Return: 0 on success, -ENOSPC if there is no space
707 */
xcan_start_xmit_mailbox(struct sk_buff * skb,struct net_device * ndev)708 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
709 {
710 struct xcan_priv *priv = netdev_priv(ndev);
711 unsigned long flags;
712
713 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
714 BIT(XCAN_TX_MAILBOX_IDX)))
715 return -ENOSPC;
716
717 spin_lock_irqsave(&priv->tx_lock, flags);
718
719 xcan_write_frame(ndev, skb,
720 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
721
722 /* Mark buffer as ready for transmit */
723 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
724
725 netif_stop_queue(ndev);
726
727 spin_unlock_irqrestore(&priv->tx_lock, flags);
728
729 return 0;
730 }
731
732 /**
733 * xcan_start_xmit - Starts the transmission
734 * @skb: sk_buff pointer that contains data to be Txed
735 * @ndev: Pointer to net_device structure
736 *
737 * This function is invoked from upper layers to initiate transmission.
738 *
739 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
740 */
xcan_start_xmit(struct sk_buff * skb,struct net_device * ndev)741 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
742 {
743 struct xcan_priv *priv = netdev_priv(ndev);
744 int ret;
745
746 if (can_dev_dropped_skb(ndev, skb))
747 return NETDEV_TX_OK;
748
749 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
750 ret = xcan_start_xmit_mailbox(skb, ndev);
751 else
752 ret = xcan_start_xmit_fifo(skb, ndev);
753
754 if (ret < 0) {
755 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
756 netif_stop_queue(ndev);
757 return NETDEV_TX_BUSY;
758 }
759
760 return NETDEV_TX_OK;
761 }
762
763 /**
764 * xcan_rx - Is called from CAN isr to complete the received
765 * frame processing
766 * @ndev: Pointer to net_device structure
767 * @frame_base: Register offset to the frame to be read
768 *
769 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
770 * does minimal processing and invokes "netif_receive_skb" to complete further
771 * processing.
772 * Return: 1 on success and 0 on failure.
773 */
xcan_rx(struct net_device * ndev,int frame_base)774 static int xcan_rx(struct net_device *ndev, int frame_base)
775 {
776 struct xcan_priv *priv = netdev_priv(ndev);
777 struct net_device_stats *stats = &ndev->stats;
778 struct can_frame *cf;
779 struct sk_buff *skb;
780 u32 id_xcan, dlc, data[2] = {0, 0};
781
782 skb = alloc_can_skb(ndev, &cf);
783 if (unlikely(!skb)) {
784 stats->rx_dropped++;
785 return 0;
786 }
787
788 /* Read a frame from Xilinx zynq CANPS */
789 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
790 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
791 XCAN_DLCR_DLC_SHIFT;
792
793 /* Change Xilinx CAN data length format to socketCAN data format */
794 cf->len = can_cc_dlc2len(dlc);
795
796 /* Change Xilinx CAN ID format to socketCAN ID format */
797 if (id_xcan & XCAN_IDR_IDE_MASK) {
798 /* The received frame is an Extended format frame */
799 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
800 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
801 XCAN_IDR_ID2_SHIFT;
802 cf->can_id |= CAN_EFF_FLAG;
803 if (id_xcan & XCAN_IDR_RTR_MASK)
804 cf->can_id |= CAN_RTR_FLAG;
805 } else {
806 /* The received frame is a standard format frame */
807 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
808 XCAN_IDR_ID1_SHIFT;
809 if (id_xcan & XCAN_IDR_SRR_MASK)
810 cf->can_id |= CAN_RTR_FLAG;
811 }
812
813 /* DW1/DW2 must always be read to remove message from RXFIFO */
814 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
815 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
816
817 if (!(cf->can_id & CAN_RTR_FLAG)) {
818 /* Change Xilinx CAN data format to socketCAN data format */
819 if (cf->len > 0)
820 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
821 if (cf->len > 4)
822 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
823
824 stats->rx_bytes += cf->len;
825 }
826 stats->rx_packets++;
827
828 netif_receive_skb(skb);
829
830 return 1;
831 }
832
833 /**
834 * xcanfd_rx - Is called from CAN isr to complete the received
835 * frame processing
836 * @ndev: Pointer to net_device structure
837 * @frame_base: Register offset to the frame to be read
838 *
839 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
840 * does minimal processing and invokes "netif_receive_skb" to complete further
841 * processing.
842 * Return: 1 on success and 0 on failure.
843 */
xcanfd_rx(struct net_device * ndev,int frame_base)844 static int xcanfd_rx(struct net_device *ndev, int frame_base)
845 {
846 struct xcan_priv *priv = netdev_priv(ndev);
847 struct net_device_stats *stats = &ndev->stats;
848 struct canfd_frame *cf;
849 struct sk_buff *skb;
850 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
851
852 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
853 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
854 if (dlc & XCAN_DLCR_EDL_MASK)
855 skb = alloc_canfd_skb(ndev, &cf);
856 else
857 skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
858
859 if (unlikely(!skb)) {
860 stats->rx_dropped++;
861 return 0;
862 }
863
864 /* Change Xilinx CANFD data length format to socketCAN data
865 * format
866 */
867 if (dlc & XCAN_DLCR_EDL_MASK)
868 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
869 XCAN_DLCR_DLC_SHIFT);
870 else
871 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
872 XCAN_DLCR_DLC_SHIFT);
873
874 /* Change Xilinx CAN ID format to socketCAN ID format */
875 if (id_xcan & XCAN_IDR_IDE_MASK) {
876 /* The received frame is an Extended format frame */
877 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
878 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
879 XCAN_IDR_ID2_SHIFT;
880 cf->can_id |= CAN_EFF_FLAG;
881 if (id_xcan & XCAN_IDR_RTR_MASK)
882 cf->can_id |= CAN_RTR_FLAG;
883 } else {
884 /* The received frame is a standard format frame */
885 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
886 XCAN_IDR_ID1_SHIFT;
887 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
888 XCAN_IDR_SRR_MASK))
889 cf->can_id |= CAN_RTR_FLAG;
890 }
891
892 /* Check the frame received is FD or not*/
893 if (dlc & XCAN_DLCR_EDL_MASK) {
894 for (i = 0; i < cf->len; i += 4) {
895 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
896 (dwindex * XCANFD_DW_BYTES);
897 data[0] = priv->read_reg(priv, dw_offset);
898 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
899 dwindex++;
900 }
901 } else {
902 for (i = 0; i < cf->len; i += 4) {
903 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
904 data[0] = priv->read_reg(priv, dw_offset + i);
905 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
906 }
907 }
908
909 if (!(cf->can_id & CAN_RTR_FLAG))
910 stats->rx_bytes += cf->len;
911 stats->rx_packets++;
912
913 netif_receive_skb(skb);
914
915 return 1;
916 }
917
918 /**
919 * xcan_current_error_state - Get current error state from HW
920 * @ndev: Pointer to net_device structure
921 *
922 * Checks the current CAN error state from the HW. Note that this
923 * only checks for ERROR_PASSIVE and ERROR_WARNING.
924 *
925 * Return:
926 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
927 * otherwise.
928 */
xcan_current_error_state(struct net_device * ndev)929 static enum can_state xcan_current_error_state(struct net_device *ndev)
930 {
931 struct xcan_priv *priv = netdev_priv(ndev);
932 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
933
934 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
935 return CAN_STATE_ERROR_PASSIVE;
936 else if (status & XCAN_SR_ERRWRN_MASK)
937 return CAN_STATE_ERROR_WARNING;
938 else
939 return CAN_STATE_ERROR_ACTIVE;
940 }
941
942 /**
943 * xcan_set_error_state - Set new CAN error state
944 * @ndev: Pointer to net_device structure
945 * @new_state: The new CAN state to be set
946 * @cf: Error frame to be populated or NULL
947 *
948 * Set new CAN error state for the device, updating statistics and
949 * populating the error frame if given.
950 */
xcan_set_error_state(struct net_device * ndev,enum can_state new_state,struct can_frame * cf)951 static void xcan_set_error_state(struct net_device *ndev,
952 enum can_state new_state,
953 struct can_frame *cf)
954 {
955 struct xcan_priv *priv = netdev_priv(ndev);
956 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
957 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
958 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
959 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
960 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
961
962 /* non-ERROR states are handled elsewhere */
963 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
964 return;
965
966 can_change_state(ndev, cf, tx_state, rx_state);
967
968 if (cf) {
969 cf->can_id |= CAN_ERR_CNT;
970 cf->data[6] = txerr;
971 cf->data[7] = rxerr;
972 }
973 }
974
975 /**
976 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
977 * @ndev: Pointer to net_device structure
978 *
979 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
980 * the performed RX/TX has caused it to drop to a lesser state and set
981 * the interface state accordingly.
982 */
xcan_update_error_state_after_rxtx(struct net_device * ndev)983 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
984 {
985 struct xcan_priv *priv = netdev_priv(ndev);
986 enum can_state old_state = priv->can.state;
987 enum can_state new_state;
988
989 /* changing error state due to successful frame RX/TX can only
990 * occur from these states
991 */
992 if (old_state != CAN_STATE_ERROR_WARNING &&
993 old_state != CAN_STATE_ERROR_PASSIVE)
994 return;
995
996 new_state = xcan_current_error_state(ndev);
997
998 if (new_state != old_state) {
999 struct sk_buff *skb;
1000 struct can_frame *cf;
1001
1002 skb = alloc_can_err_skb(ndev, &cf);
1003
1004 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
1005
1006 if (skb)
1007 netif_rx(skb);
1008 }
1009 }
1010
1011 /**
1012 * xcan_err_interrupt - error frame Isr
1013 * @ndev: net_device pointer
1014 * @isr: interrupt status register value
1015 *
1016 * This is the CAN error interrupt and it will
1017 * check the type of error and forward the error
1018 * frame to upper layers.
1019 */
xcan_err_interrupt(struct net_device * ndev,u32 isr)1020 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1021 {
1022 struct xcan_priv *priv = netdev_priv(ndev);
1023 struct net_device_stats *stats = &ndev->stats;
1024 struct can_frame cf = { };
1025 u32 err_status;
1026
1027 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
1028 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
1029
1030 if (isr & XCAN_IXR_BSOFF_MASK) {
1031 priv->can.state = CAN_STATE_BUS_OFF;
1032 priv->can.can_stats.bus_off++;
1033 /* Leave device in Config Mode in bus-off state */
1034 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1035 can_bus_off(ndev);
1036 cf.can_id |= CAN_ERR_BUSOFF;
1037 } else {
1038 enum can_state new_state = xcan_current_error_state(ndev);
1039
1040 if (new_state != priv->can.state)
1041 xcan_set_error_state(ndev, new_state, &cf);
1042 }
1043
1044 /* Check for Arbitration lost interrupt */
1045 if (isr & XCAN_IXR_ARBLST_MASK) {
1046 priv->can.can_stats.arbitration_lost++;
1047 cf.can_id |= CAN_ERR_LOSTARB;
1048 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1049 }
1050
1051 /* Check for RX FIFO Overflow interrupt */
1052 if (isr & XCAN_IXR_RXOFLW_MASK) {
1053 stats->rx_over_errors++;
1054 stats->rx_errors++;
1055 cf.can_id |= CAN_ERR_CRTL;
1056 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1057 }
1058
1059 /* Check for RX Match Not Finished interrupt */
1060 if (isr & XCAN_IXR_RXMNF_MASK) {
1061 stats->rx_dropped++;
1062 stats->rx_errors++;
1063 netdev_err(ndev, "RX match not finished, frame discarded\n");
1064 cf.can_id |= CAN_ERR_CRTL;
1065 cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1066 }
1067
1068 /* Check for error interrupt */
1069 if (isr & XCAN_IXR_ERROR_MASK) {
1070 bool berr_reporting = false;
1071
1072 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1073 berr_reporting = true;
1074 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1075 }
1076
1077 /* Check for Ack error interrupt */
1078 if (err_status & XCAN_ESR_ACKER_MASK) {
1079 stats->tx_errors++;
1080 if (berr_reporting) {
1081 cf.can_id |= CAN_ERR_ACK;
1082 cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1083 }
1084 }
1085
1086 /* Check for Bit error interrupt */
1087 if (err_status & XCAN_ESR_BERR_MASK) {
1088 stats->tx_errors++;
1089 if (berr_reporting) {
1090 cf.can_id |= CAN_ERR_PROT;
1091 cf.data[2] = CAN_ERR_PROT_BIT;
1092 }
1093 }
1094
1095 /* Check for Stuff error interrupt */
1096 if (err_status & XCAN_ESR_STER_MASK) {
1097 stats->rx_errors++;
1098 if (berr_reporting) {
1099 cf.can_id |= CAN_ERR_PROT;
1100 cf.data[2] = CAN_ERR_PROT_STUFF;
1101 }
1102 }
1103
1104 /* Check for Form error interrupt */
1105 if (err_status & XCAN_ESR_FMER_MASK) {
1106 stats->rx_errors++;
1107 if (berr_reporting) {
1108 cf.can_id |= CAN_ERR_PROT;
1109 cf.data[2] = CAN_ERR_PROT_FORM;
1110 }
1111 }
1112
1113 /* Check for CRC error interrupt */
1114 if (err_status & XCAN_ESR_CRCER_MASK) {
1115 stats->rx_errors++;
1116 if (berr_reporting) {
1117 cf.can_id |= CAN_ERR_PROT;
1118 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1119 }
1120 }
1121 priv->can.can_stats.bus_error++;
1122 }
1123
1124 if (cf.can_id) {
1125 struct can_frame *skb_cf;
1126 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1127
1128 if (skb) {
1129 skb_cf->can_id |= cf.can_id;
1130 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1131 netif_rx(skb);
1132 }
1133 }
1134
1135 netdev_dbg(ndev, "%s: error status register:0x%x\n",
1136 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1137 }
1138
1139 /**
1140 * xcan_state_interrupt - It will check the state of the CAN device
1141 * @ndev: net_device pointer
1142 * @isr: interrupt status register value
1143 *
1144 * This will checks the state of the CAN device
1145 * and puts the device into appropriate state.
1146 */
xcan_state_interrupt(struct net_device * ndev,u32 isr)1147 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1148 {
1149 struct xcan_priv *priv = netdev_priv(ndev);
1150
1151 /* Check for Sleep interrupt if set put CAN device in sleep state */
1152 if (isr & XCAN_IXR_SLP_MASK)
1153 priv->can.state = CAN_STATE_SLEEPING;
1154
1155 /* Check for Wake up interrupt if set put CAN device in Active state */
1156 if (isr & XCAN_IXR_WKUP_MASK)
1157 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1158 }
1159
1160 /**
1161 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1162 * @priv: Driver private data structure
1163 *
1164 * Return: Register offset of the next frame in RX FIFO.
1165 */
xcan_rx_fifo_get_next_frame(struct xcan_priv * priv)1166 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1167 {
1168 int offset;
1169
1170 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1171 u32 fsr, mask;
1172
1173 /* clear RXOK before the is-empty check so that any newly
1174 * received frame will reassert it without a race
1175 */
1176 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1177
1178 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1179
1180 /* check if RX FIFO is empty */
1181 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1182 mask = XCAN_2_FSR_FL_MASK;
1183 else
1184 mask = XCAN_FSR_FL_MASK;
1185
1186 if (!(fsr & mask))
1187 return -ENOENT;
1188
1189 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1190 offset =
1191 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1192 else
1193 offset =
1194 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1195
1196 } else {
1197 /* check if RX FIFO is empty */
1198 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1199 XCAN_IXR_RXNEMP_MASK))
1200 return -ENOENT;
1201
1202 /* frames are read from a static offset */
1203 offset = XCAN_RXFIFO_OFFSET;
1204 }
1205
1206 return offset;
1207 }
1208
1209 /**
1210 * xcan_rx_poll - Poll routine for rx packets (NAPI)
1211 * @napi: napi structure pointer
1212 * @quota: Max number of rx packets to be processed.
1213 *
1214 * This is the poll routine for rx part.
1215 * It will process the packets maximux quota value.
1216 *
1217 * Return: number of packets received
1218 */
xcan_rx_poll(struct napi_struct * napi,int quota)1219 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1220 {
1221 struct net_device *ndev = napi->dev;
1222 struct xcan_priv *priv = netdev_priv(ndev);
1223 u32 ier;
1224 int work_done = 0;
1225 int frame_offset;
1226
1227 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1228 (work_done < quota)) {
1229 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1230 work_done += xcanfd_rx(ndev, frame_offset);
1231 else
1232 work_done += xcan_rx(ndev, frame_offset);
1233
1234 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1235 /* increment read index */
1236 priv->write_reg(priv, XCAN_FSR_OFFSET,
1237 XCAN_FSR_IRI_MASK);
1238 else
1239 /* clear rx-not-empty (will actually clear only if
1240 * empty)
1241 */
1242 priv->write_reg(priv, XCAN_ICR_OFFSET,
1243 XCAN_IXR_RXNEMP_MASK);
1244 }
1245
1246 if (work_done)
1247 xcan_update_error_state_after_rxtx(ndev);
1248
1249 if (work_done < quota) {
1250 if (napi_complete_done(napi, work_done)) {
1251 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1252 ier |= xcan_rx_int_mask(priv);
1253 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1254 }
1255 }
1256 return work_done;
1257 }
1258
1259 /**
1260 * xcan_tx_interrupt - Tx Done Isr
1261 * @ndev: net_device pointer
1262 * @isr: Interrupt status register value
1263 */
xcan_tx_interrupt(struct net_device * ndev,u32 isr)1264 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1265 {
1266 struct xcan_priv *priv = netdev_priv(ndev);
1267 struct net_device_stats *stats = &ndev->stats;
1268 unsigned int frames_in_fifo;
1269 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1270 unsigned long flags;
1271 int retries = 0;
1272
1273 /* Synchronize with xmit as we need to know the exact number
1274 * of frames in the FIFO to stay in sync due to the TXFEMP
1275 * handling.
1276 * This also prevents a race between netif_wake_queue() and
1277 * netif_stop_queue().
1278 */
1279 spin_lock_irqsave(&priv->tx_lock, flags);
1280
1281 frames_in_fifo = priv->tx_head - priv->tx_tail;
1282
1283 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1284 /* clear TXOK anyway to avoid getting back here */
1285 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1286 spin_unlock_irqrestore(&priv->tx_lock, flags);
1287 return;
1288 }
1289
1290 /* Check if 2 frames were sent (TXOK only means that at least 1
1291 * frame was sent).
1292 */
1293 if (frames_in_fifo > 1) {
1294 WARN_ON(frames_in_fifo > priv->tx_max);
1295
1296 /* Synchronize TXOK and isr so that after the loop:
1297 * (1) isr variable is up-to-date at least up to TXOK clear
1298 * time. This avoids us clearing a TXOK of a second frame
1299 * but not noticing that the FIFO is now empty and thus
1300 * marking only a single frame as sent.
1301 * (2) No TXOK is left. Having one could mean leaving a
1302 * stray TXOK as we might process the associated frame
1303 * via TXFEMP handling as we read TXFEMP *after* TXOK
1304 * clear to satisfy (1).
1305 */
1306 while ((isr & XCAN_IXR_TXOK_MASK) &&
1307 !WARN_ON(++retries == 100)) {
1308 priv->write_reg(priv, XCAN_ICR_OFFSET,
1309 XCAN_IXR_TXOK_MASK);
1310 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1311 }
1312
1313 if (isr & XCAN_IXR_TXFEMP_MASK) {
1314 /* nothing in FIFO anymore */
1315 frames_sent = frames_in_fifo;
1316 }
1317 } else {
1318 /* single frame in fifo, just clear TXOK */
1319 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1320 }
1321
1322 while (frames_sent--) {
1323 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1324 priv->tx_max, NULL);
1325 priv->tx_tail++;
1326 stats->tx_packets++;
1327 }
1328
1329 netif_wake_queue(ndev);
1330
1331 spin_unlock_irqrestore(&priv->tx_lock, flags);
1332
1333 xcan_update_error_state_after_rxtx(ndev);
1334 }
1335
1336 /**
1337 * xcan_interrupt - CAN Isr
1338 * @irq: irq number
1339 * @dev_id: device id pointer
1340 *
1341 * This is the xilinx CAN Isr. It checks for the type of interrupt
1342 * and invokes the corresponding ISR.
1343 *
1344 * Return:
1345 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1346 */
xcan_interrupt(int irq,void * dev_id)1347 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1348 {
1349 struct net_device *ndev = (struct net_device *)dev_id;
1350 struct xcan_priv *priv = netdev_priv(ndev);
1351 u32 isr, ier;
1352 u32 isr_errors;
1353 u32 rx_int_mask = xcan_rx_int_mask(priv);
1354
1355 /* Get the interrupt status from Xilinx CAN */
1356 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1357 if (!isr)
1358 return IRQ_NONE;
1359
1360 /* Check for the type of interrupt and Processing it */
1361 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1362 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1363 XCAN_IXR_WKUP_MASK));
1364 xcan_state_interrupt(ndev, isr);
1365 }
1366
1367 /* Check for Tx interrupt and Processing it */
1368 if (isr & XCAN_IXR_TXOK_MASK)
1369 xcan_tx_interrupt(ndev, isr);
1370
1371 /* Check for the type of error interrupt and Processing it */
1372 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1373 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1374 XCAN_IXR_RXMNF_MASK);
1375 if (isr_errors) {
1376 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1377 xcan_err_interrupt(ndev, isr);
1378 }
1379
1380 /* Check for the type of receive interrupt and Processing it */
1381 if (isr & rx_int_mask) {
1382 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1383 ier &= ~rx_int_mask;
1384 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1385 napi_schedule(&priv->napi);
1386 }
1387 return IRQ_HANDLED;
1388 }
1389
1390 /**
1391 * xcan_chip_stop - Driver stop routine
1392 * @ndev: Pointer to net_device structure
1393 *
1394 * This is the drivers stop routine. It will disable the
1395 * interrupts and put the device into configuration mode.
1396 */
xcan_chip_stop(struct net_device * ndev)1397 static void xcan_chip_stop(struct net_device *ndev)
1398 {
1399 struct xcan_priv *priv = netdev_priv(ndev);
1400 int ret;
1401
1402 /* Disable interrupts and leave the can in configuration mode */
1403 ret = set_reset_mode(ndev);
1404 if (ret < 0)
1405 netdev_dbg(ndev, "set_reset_mode() Failed\n");
1406
1407 priv->can.state = CAN_STATE_STOPPED;
1408 }
1409
1410 /**
1411 * xcan_open - Driver open routine
1412 * @ndev: Pointer to net_device structure
1413 *
1414 * This is the driver open routine.
1415 * Return: 0 on success and failure value on error
1416 */
xcan_open(struct net_device * ndev)1417 static int xcan_open(struct net_device *ndev)
1418 {
1419 struct xcan_priv *priv = netdev_priv(ndev);
1420 int ret;
1421
1422 ret = pm_runtime_get_sync(priv->dev);
1423 if (ret < 0) {
1424 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1425 __func__, ret);
1426 goto err;
1427 }
1428
1429 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1430 ndev->name, ndev);
1431 if (ret < 0) {
1432 netdev_err(ndev, "irq allocation for CAN failed\n");
1433 goto err;
1434 }
1435
1436 /* Set chip into reset mode */
1437 ret = set_reset_mode(ndev);
1438 if (ret < 0) {
1439 netdev_err(ndev, "mode resetting failed!\n");
1440 goto err_irq;
1441 }
1442
1443 /* Common open */
1444 ret = open_candev(ndev);
1445 if (ret)
1446 goto err_irq;
1447
1448 ret = xcan_chip_start(ndev);
1449 if (ret < 0) {
1450 netdev_err(ndev, "xcan_chip_start failed!\n");
1451 goto err_candev;
1452 }
1453
1454 napi_enable(&priv->napi);
1455 netif_start_queue(ndev);
1456
1457 return 0;
1458
1459 err_candev:
1460 close_candev(ndev);
1461 err_irq:
1462 free_irq(ndev->irq, ndev);
1463 err:
1464 pm_runtime_put(priv->dev);
1465
1466 return ret;
1467 }
1468
1469 /**
1470 * xcan_close - Driver close routine
1471 * @ndev: Pointer to net_device structure
1472 *
1473 * Return: 0 always
1474 */
xcan_close(struct net_device * ndev)1475 static int xcan_close(struct net_device *ndev)
1476 {
1477 struct xcan_priv *priv = netdev_priv(ndev);
1478
1479 netif_stop_queue(ndev);
1480 napi_disable(&priv->napi);
1481 xcan_chip_stop(ndev);
1482 free_irq(ndev->irq, ndev);
1483 close_candev(ndev);
1484
1485 pm_runtime_put(priv->dev);
1486
1487 return 0;
1488 }
1489
1490 /**
1491 * xcan_get_berr_counter - error counter routine
1492 * @ndev: Pointer to net_device structure
1493 * @bec: Pointer to can_berr_counter structure
1494 *
1495 * This is the driver error counter routine.
1496 * Return: 0 on success and failure value on error
1497 */
xcan_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1498 static int xcan_get_berr_counter(const struct net_device *ndev,
1499 struct can_berr_counter *bec)
1500 {
1501 struct xcan_priv *priv = netdev_priv(ndev);
1502 int ret;
1503
1504 ret = pm_runtime_get_sync(priv->dev);
1505 if (ret < 0) {
1506 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1507 __func__, ret);
1508 pm_runtime_put(priv->dev);
1509 return ret;
1510 }
1511
1512 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1513 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1514 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1515
1516 pm_runtime_put(priv->dev);
1517
1518 return 0;
1519 }
1520
1521 /**
1522 * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
1523 * @ndev: Pointer to net_device structure
1524 * @tdcv: Pointer to TDCV value
1525 *
1526 * Return: 0 on success
1527 */
xcan_get_auto_tdcv(const struct net_device * ndev,u32 * tdcv)1528 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
1529 {
1530 struct xcan_priv *priv = netdev_priv(ndev);
1531
1532 *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
1533
1534 return 0;
1535 }
1536
1537 static const struct net_device_ops xcan_netdev_ops = {
1538 .ndo_open = xcan_open,
1539 .ndo_stop = xcan_close,
1540 .ndo_start_xmit = xcan_start_xmit,
1541 .ndo_change_mtu = can_change_mtu,
1542 };
1543
1544 static const struct ethtool_ops xcan_ethtool_ops = {
1545 .get_ts_info = ethtool_op_get_ts_info,
1546 };
1547
1548 /**
1549 * xcan_suspend - Suspend method for the driver
1550 * @dev: Address of the device structure
1551 *
1552 * Put the driver into low power mode.
1553 * Return: 0 on success and failure value on error
1554 */
xcan_suspend(struct device * dev)1555 static int __maybe_unused xcan_suspend(struct device *dev)
1556 {
1557 struct net_device *ndev = dev_get_drvdata(dev);
1558
1559 if (netif_running(ndev)) {
1560 netif_stop_queue(ndev);
1561 netif_device_detach(ndev);
1562 xcan_chip_stop(ndev);
1563 }
1564
1565 return pm_runtime_force_suspend(dev);
1566 }
1567
1568 /**
1569 * xcan_resume - Resume from suspend
1570 * @dev: Address of the device structure
1571 *
1572 * Resume operation after suspend.
1573 * Return: 0 on success and failure value on error
1574 */
xcan_resume(struct device * dev)1575 static int __maybe_unused xcan_resume(struct device *dev)
1576 {
1577 struct net_device *ndev = dev_get_drvdata(dev);
1578 int ret;
1579
1580 ret = pm_runtime_force_resume(dev);
1581 if (ret) {
1582 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1583 return ret;
1584 }
1585
1586 if (netif_running(ndev)) {
1587 ret = xcan_chip_start(ndev);
1588 if (ret) {
1589 dev_err(dev, "xcan_chip_start failed on resume\n");
1590 return ret;
1591 }
1592
1593 netif_device_attach(ndev);
1594 netif_start_queue(ndev);
1595 }
1596
1597 return 0;
1598 }
1599
1600 /**
1601 * xcan_runtime_suspend - Runtime suspend method for the driver
1602 * @dev: Address of the device structure
1603 *
1604 * Put the driver into low power mode.
1605 * Return: 0 always
1606 */
xcan_runtime_suspend(struct device * dev)1607 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1608 {
1609 struct net_device *ndev = dev_get_drvdata(dev);
1610 struct xcan_priv *priv = netdev_priv(ndev);
1611
1612 clk_disable_unprepare(priv->bus_clk);
1613 clk_disable_unprepare(priv->can_clk);
1614
1615 return 0;
1616 }
1617
1618 /**
1619 * xcan_runtime_resume - Runtime resume from suspend
1620 * @dev: Address of the device structure
1621 *
1622 * Resume operation after suspend.
1623 * Return: 0 on success and failure value on error
1624 */
xcan_runtime_resume(struct device * dev)1625 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1626 {
1627 struct net_device *ndev = dev_get_drvdata(dev);
1628 struct xcan_priv *priv = netdev_priv(ndev);
1629 int ret;
1630
1631 ret = clk_prepare_enable(priv->bus_clk);
1632 if (ret) {
1633 dev_err(dev, "Cannot enable clock.\n");
1634 return ret;
1635 }
1636 ret = clk_prepare_enable(priv->can_clk);
1637 if (ret) {
1638 dev_err(dev, "Cannot enable clock.\n");
1639 clk_disable_unprepare(priv->bus_clk);
1640 return ret;
1641 }
1642
1643 return 0;
1644 }
1645
1646 static const struct dev_pm_ops xcan_dev_pm_ops = {
1647 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1648 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1649 };
1650
1651 static const struct xcan_devtype_data xcan_zynq_data = {
1652 .cantype = XZYNQ_CANPS,
1653 .flags = XCAN_FLAG_TXFEMP,
1654 .bittiming_const = &xcan_bittiming_const,
1655 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1656 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1657 .bus_clk_name = "pclk",
1658 };
1659
1660 static const struct xcan_devtype_data xcan_axi_data = {
1661 .cantype = XAXI_CAN,
1662 .bittiming_const = &xcan_bittiming_const,
1663 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1664 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1665 .bus_clk_name = "s_axi_aclk",
1666 };
1667
1668 static const struct xcan_devtype_data xcan_canfd_data = {
1669 .cantype = XAXI_CANFD,
1670 .flags = XCAN_FLAG_EXT_FILTERS |
1671 XCAN_FLAG_RXMNF |
1672 XCAN_FLAG_TX_MAILBOXES |
1673 XCAN_FLAG_RX_FIFO_MULTI,
1674 .bittiming_const = &xcan_bittiming_const_canfd,
1675 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1676 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1677 .bus_clk_name = "s_axi_aclk",
1678 };
1679
1680 static const struct xcan_devtype_data xcan_canfd2_data = {
1681 .cantype = XAXI_CANFD_2_0,
1682 .flags = XCAN_FLAG_EXT_FILTERS |
1683 XCAN_FLAG_RXMNF |
1684 XCAN_FLAG_TX_MAILBOXES |
1685 XCAN_FLAG_CANFD_2 |
1686 XCAN_FLAG_RX_FIFO_MULTI,
1687 .bittiming_const = &xcan_bittiming_const_canfd2,
1688 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1689 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1690 .bus_clk_name = "s_axi_aclk",
1691 };
1692
1693 /* Match table for OF platform binding */
1694 static const struct of_device_id xcan_of_match[] = {
1695 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1696 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1697 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1698 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1699 { /* end of list */ },
1700 };
1701 MODULE_DEVICE_TABLE(of, xcan_of_match);
1702
1703 /**
1704 * xcan_probe - Platform registration call
1705 * @pdev: Handle to the platform device structure
1706 *
1707 * This function does all the memory allocation and registration for the CAN
1708 * device.
1709 *
1710 * Return: 0 on success and failure value on error
1711 */
xcan_probe(struct platform_device * pdev)1712 static int xcan_probe(struct platform_device *pdev)
1713 {
1714 struct net_device *ndev;
1715 struct xcan_priv *priv;
1716 const struct of_device_id *of_id;
1717 const struct xcan_devtype_data *devtype = &xcan_axi_data;
1718 void __iomem *addr;
1719 int ret;
1720 int rx_max, tx_max;
1721 u32 hw_tx_max = 0, hw_rx_max = 0;
1722 const char *hw_tx_max_property;
1723
1724 /* Get the virtual base address for the device */
1725 addr = devm_platform_ioremap_resource(pdev, 0);
1726 if (IS_ERR(addr)) {
1727 ret = PTR_ERR(addr);
1728 goto err;
1729 }
1730
1731 of_id = of_match_device(xcan_of_match, &pdev->dev);
1732 if (of_id && of_id->data)
1733 devtype = of_id->data;
1734
1735 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1736 "tx-mailbox-count" : "tx-fifo-depth";
1737
1738 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1739 &hw_tx_max);
1740 if (ret < 0) {
1741 dev_err(&pdev->dev, "missing %s property\n",
1742 hw_tx_max_property);
1743 goto err;
1744 }
1745
1746 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1747 &hw_rx_max);
1748 if (ret < 0) {
1749 dev_err(&pdev->dev,
1750 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1751 goto err;
1752 }
1753
1754 /* With TX FIFO:
1755 *
1756 * There is no way to directly figure out how many frames have been
1757 * sent when the TXOK interrupt is processed. If TXFEMP
1758 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1759 * to determine if 1 or 2 frames have been sent.
1760 * Theoretically we should be able to use TXFWMEMP to determine up
1761 * to 3 frames, but it seems that after putting a second frame in the
1762 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1763 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1764 * sent), which is not a sensible state - possibly TXFWMEMP is not
1765 * completely synchronized with the rest of the bits?
1766 *
1767 * With TX mailboxes:
1768 *
1769 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1770 * we submit frames one at a time.
1771 */
1772 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1773 (devtype->flags & XCAN_FLAG_TXFEMP))
1774 tx_max = min(hw_tx_max, 2U);
1775 else
1776 tx_max = 1;
1777
1778 rx_max = hw_rx_max;
1779
1780 /* Create a CAN device instance */
1781 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1782 if (!ndev)
1783 return -ENOMEM;
1784
1785 priv = netdev_priv(ndev);
1786 priv->dev = &pdev->dev;
1787 priv->can.bittiming_const = devtype->bittiming_const;
1788 priv->can.do_set_mode = xcan_do_set_mode;
1789 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1790 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1791 CAN_CTRLMODE_BERR_REPORTING;
1792
1793 if (devtype->cantype == XAXI_CANFD) {
1794 priv->can.data_bittiming_const =
1795 &xcan_data_bittiming_const_canfd;
1796 priv->can.tdc_const = &xcan_tdc_const_canfd;
1797 }
1798
1799 if (devtype->cantype == XAXI_CANFD_2_0) {
1800 priv->can.data_bittiming_const =
1801 &xcan_data_bittiming_const_canfd2;
1802 priv->can.tdc_const = &xcan_tdc_const_canfd2;
1803 }
1804
1805 if (devtype->cantype == XAXI_CANFD ||
1806 devtype->cantype == XAXI_CANFD_2_0) {
1807 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
1808 CAN_CTRLMODE_TDC_AUTO;
1809 priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
1810 }
1811
1812 priv->reg_base = addr;
1813 priv->tx_max = tx_max;
1814 priv->devtype = *devtype;
1815 spin_lock_init(&priv->tx_lock);
1816
1817 /* Get IRQ for the device */
1818 ret = platform_get_irq(pdev, 0);
1819 if (ret < 0)
1820 goto err_free;
1821
1822 ndev->irq = ret;
1823
1824 ndev->flags |= IFF_ECHO; /* We support local echo */
1825
1826 platform_set_drvdata(pdev, ndev);
1827 SET_NETDEV_DEV(ndev, &pdev->dev);
1828 ndev->netdev_ops = &xcan_netdev_ops;
1829 ndev->ethtool_ops = &xcan_ethtool_ops;
1830
1831 /* Getting the CAN can_clk info */
1832 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1833 if (IS_ERR(priv->can_clk)) {
1834 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
1835 "device clock not found\n");
1836 goto err_free;
1837 }
1838
1839 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1840 if (IS_ERR(priv->bus_clk)) {
1841 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
1842 "bus clock not found\n");
1843 goto err_free;
1844 }
1845
1846 priv->write_reg = xcan_write_reg_le;
1847 priv->read_reg = xcan_read_reg_le;
1848
1849 pm_runtime_enable(&pdev->dev);
1850 ret = pm_runtime_get_sync(&pdev->dev);
1851 if (ret < 0) {
1852 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1853 __func__, ret);
1854 goto err_disableclks;
1855 }
1856
1857 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1858 priv->write_reg = xcan_write_reg_be;
1859 priv->read_reg = xcan_read_reg_be;
1860 }
1861
1862 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1863
1864 netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
1865
1866 ret = register_candev(ndev);
1867 if (ret) {
1868 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1869 goto err_disableclks;
1870 }
1871
1872 pm_runtime_put(&pdev->dev);
1873
1874 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1875 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1876 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1877 }
1878
1879 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1880 priv->reg_base, ndev->irq, priv->can.clock.freq,
1881 hw_tx_max, priv->tx_max);
1882
1883 return 0;
1884
1885 err_disableclks:
1886 pm_runtime_put(priv->dev);
1887 pm_runtime_disable(&pdev->dev);
1888 err_free:
1889 free_candev(ndev);
1890 err:
1891 return ret;
1892 }
1893
1894 /**
1895 * xcan_remove - Unregister the device after releasing the resources
1896 * @pdev: Handle to the platform device structure
1897 *
1898 * This function frees all the resources allocated to the device.
1899 * Return: 0 always
1900 */
xcan_remove(struct platform_device * pdev)1901 static int xcan_remove(struct platform_device *pdev)
1902 {
1903 struct net_device *ndev = platform_get_drvdata(pdev);
1904
1905 unregister_candev(ndev);
1906 pm_runtime_disable(&pdev->dev);
1907 free_candev(ndev);
1908
1909 return 0;
1910 }
1911
1912 static struct platform_driver xcan_driver = {
1913 .probe = xcan_probe,
1914 .remove = xcan_remove,
1915 .driver = {
1916 .name = DRIVER_NAME,
1917 .pm = &xcan_dev_pm_ops,
1918 .of_match_table = xcan_of_match,
1919 },
1920 };
1921
1922 module_platform_driver(xcan_driver);
1923
1924 MODULE_LICENSE("GPL");
1925 MODULE_AUTHOR("Xilinx Inc");
1926 MODULE_DESCRIPTION("Xilinx CAN interface");
1927