1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3  *
4  * Copyright (C) 2012 - 2022 Xilinx, Inc.
5  * Copyright (C) 2009 PetaLogix. All rights reserved.
6  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7  *
8  * Description:
9  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10  */
11 
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/ethtool.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/platform_device.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/can/dev.h>
30 #include <linux/can/error.h>
31 #include <linux/phy/phy.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/reset.h>
34 
35 #define DRIVER_NAME	"xilinx_can"
36 
37 /* CAN registers set */
38 enum xcan_reg {
39 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
40 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
41 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
42 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
43 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
44 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
45 	XCAN_SR_OFFSET		= 0x18, /* Status */
46 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
47 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
48 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
49 
50 	/* not on CAN FD cores */
51 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
52 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
53 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
54 
55 	/* only on CAN FD cores */
56 	XCAN_F_BRPR_OFFSET	= 0x088, /* Data Phase Baud Rate
57 					  * Prescaler
58 					  */
59 	XCAN_F_BTR_OFFSET	= 0x08C, /* Data Phase Bit Timing */
60 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
61 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
62 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
63 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
64 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
65 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
66 	XCAN_AFR_2_MASK_OFFSET	= 0x0A00, /* Acceptance Filter MASK */
67 	XCAN_AFR_2_ID_OFFSET	= 0x0A04, /* Acceptance Filter ID */
68 };
69 
70 #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
71 #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
72 #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
73 #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
74 #define XCANFD_FRAME_DW_OFFSET(frame_base)	((frame_base) + 0x08)
75 
76 #define XCAN_CANFD_FRAME_SIZE		0x48
77 #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
78 					 XCAN_CANFD_FRAME_SIZE * (n))
79 #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
80 					 XCAN_CANFD_FRAME_SIZE * (n))
81 #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
82 					 XCAN_CANFD_FRAME_SIZE * (n))
83 
84 /* the single TX mailbox used by this driver on CAN FD HW */
85 #define XCAN_TX_MAILBOX_IDX		0
86 
87 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
88 #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
89 #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
90 #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
91 #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
92 #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
93 #define XCAN_BRPR_TDCO_MASK		GENMASK(12, 8)  /* TDCO */
94 #define XCAN_2_BRPR_TDCO_MASK		GENMASK(13, 8)  /* TDCO for CANFD 2.0 */
95 #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
96 #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
97 #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
98 #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
99 #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
100 #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
101 #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
102 #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
103 #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
104 #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
105 #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
106 #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
107 #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
108 #define XCAN_SR_TDCV_MASK		GENMASK(22, 16) /* TDCV Value */
109 #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
110 #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
111 #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
112 #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
113 #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
114 #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
115 #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
116 #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
117 #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
118 #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
119 #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
120 #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
121 #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
122 #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
123 #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
124 #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
125 #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
126 #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
127 #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
128 #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
129 #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
130 #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
131 #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
132 #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
133 #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
134 #define XCAN_2_FSR_FL_MASK		0x00007F00 /* RX Fill Level */
135 #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
136 #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
137 #define XCAN_2_FSR_RI_MASK		0x0000003F /* RX Read Index */
138 #define XCAN_DLCR_EDL_MASK		0x08000000 /* EDL Mask in DLC */
139 #define XCAN_DLCR_BRS_MASK		0x04000000 /* BRS Mask in DLC */
140 
141 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
142 #define XCAN_BRPR_TDC_ENABLE		BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
143 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
144 #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
145 #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
146 #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
147 #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
148 #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
149 #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
150 #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
151 
152 /* CAN frame length constants */
153 #define XCAN_FRAME_MAX_DATA_LEN		8
154 #define XCANFD_DW_BYTES			4
155 #define XCAN_TIMEOUT			(1 * HZ)
156 
157 /* TX-FIFO-empty interrupt available */
158 #define XCAN_FLAG_TXFEMP	0x0001
159 /* RX Match Not Finished interrupt available */
160 #define XCAN_FLAG_RXMNF		0x0002
161 /* Extended acceptance filters with control at 0xE0 */
162 #define XCAN_FLAG_EXT_FILTERS	0x0004
163 /* TX mailboxes instead of TX FIFO */
164 #define XCAN_FLAG_TX_MAILBOXES	0x0008
165 /* RX FIFO with each buffer in separate registers at 0x1100
166  * instead of the regular FIFO at 0x50
167  */
168 #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
169 #define XCAN_FLAG_CANFD_2	0x0020
170 
171 enum xcan_ip_type {
172 	XAXI_CAN = 0,
173 	XZYNQ_CANPS,
174 	XAXI_CANFD,
175 	XAXI_CANFD_2_0,
176 };
177 
178 struct xcan_devtype_data {
179 	enum xcan_ip_type cantype;
180 	unsigned int flags;
181 	const struct can_bittiming_const *bittiming_const;
182 	const char *bus_clk_name;
183 	unsigned int btr_ts2_shift;
184 	unsigned int btr_sjw_shift;
185 };
186 
187 /**
188  * struct xcan_priv - This definition define CAN driver instance
189  * @can:			CAN private data structure.
190  * @tx_lock:			Lock for synchronizing TX interrupt handling
191  * @tx_head:			Tx CAN packets ready to send on the queue
192  * @tx_tail:			Tx CAN packets successfully sended on the queue
193  * @tx_max:			Maximum number packets the driver can send
194  * @napi:			NAPI structure
195  * @read_reg:			For reading data from CAN registers
196  * @write_reg:			For writing data to CAN registers
197  * @dev:			Network device data structure
198  * @reg_base:			Ioremapped address to registers
199  * @irq_flags:			For request_irq()
200  * @bus_clk:			Pointer to struct clk
201  * @can_clk:			Pointer to struct clk
202  * @devtype:			Device type specific constants
203  * @transceiver:		Optional pointer to associated CAN transceiver
204  * @rstc:			Pointer to reset control
205  */
206 struct xcan_priv {
207 	struct can_priv can;
208 	spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
209 	unsigned int tx_head;
210 	unsigned int tx_tail;
211 	unsigned int tx_max;
212 	struct napi_struct napi;
213 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
214 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
215 			  u32 val);
216 	struct device *dev;
217 	void __iomem *reg_base;
218 	unsigned long irq_flags;
219 	struct clk *bus_clk;
220 	struct clk *can_clk;
221 	struct xcan_devtype_data devtype;
222 	struct phy *transceiver;
223 	struct reset_control *rstc;
224 };
225 
226 /* CAN Bittiming constants as per Xilinx CAN specs */
227 static const struct can_bittiming_const xcan_bittiming_const = {
228 	.name = DRIVER_NAME,
229 	.tseg1_min = 1,
230 	.tseg1_max = 16,
231 	.tseg2_min = 1,
232 	.tseg2_max = 8,
233 	.sjw_max = 4,
234 	.brp_min = 1,
235 	.brp_max = 256,
236 	.brp_inc = 1,
237 };
238 
239 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
240 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
241 	.name = DRIVER_NAME,
242 	.tseg1_min = 1,
243 	.tseg1_max = 64,
244 	.tseg2_min = 1,
245 	.tseg2_max = 16,
246 	.sjw_max = 16,
247 	.brp_min = 1,
248 	.brp_max = 256,
249 	.brp_inc = 1,
250 };
251 
252 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
253 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
254 	.name = DRIVER_NAME,
255 	.tseg1_min = 1,
256 	.tseg1_max = 16,
257 	.tseg2_min = 1,
258 	.tseg2_max = 8,
259 	.sjw_max = 8,
260 	.brp_min = 1,
261 	.brp_max = 256,
262 	.brp_inc = 1,
263 };
264 
265 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
266 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
267 	.name = DRIVER_NAME,
268 	.tseg1_min = 1,
269 	.tseg1_max = 256,
270 	.tseg2_min = 1,
271 	.tseg2_max = 128,
272 	.sjw_max = 128,
273 	.brp_min = 1,
274 	.brp_max = 256,
275 	.brp_inc = 1,
276 };
277 
278 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
279 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
280 	.name = DRIVER_NAME,
281 	.tseg1_min = 1,
282 	.tseg1_max = 32,
283 	.tseg2_min = 1,
284 	.tseg2_max = 16,
285 	.sjw_max = 16,
286 	.brp_min = 1,
287 	.brp_max = 256,
288 	.brp_inc = 1,
289 };
290 
291 /* Transmission Delay Compensation constants for CANFD 1.0 */
292 static const struct can_tdc_const xcan_tdc_const_canfd = {
293 	.tdcv_min = 0,
294 	.tdcv_max = 0, /* Manual mode not supported. */
295 	.tdco_min = 0,
296 	.tdco_max = 32,
297 	.tdcf_min = 0, /* Filter window not supported */
298 	.tdcf_max = 0,
299 };
300 
301 /* Transmission Delay Compensation constants for CANFD 2.0 */
302 static const struct can_tdc_const xcan_tdc_const_canfd2 = {
303 	.tdcv_min = 0,
304 	.tdcv_max = 0, /* Manual mode not supported. */
305 	.tdco_min = 0,
306 	.tdco_max = 64,
307 	.tdcf_min = 0, /* Filter window not supported */
308 	.tdcf_max = 0,
309 };
310 
311 /**
312  * xcan_write_reg_le - Write a value to the device register little endian
313  * @priv:	Driver private data structure
314  * @reg:	Register offset
315  * @val:	Value to write at the Register offset
316  *
317  * Write data to the paricular CAN register
318  */
xcan_write_reg_le(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)319 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
320 			      u32 val)
321 {
322 	iowrite32(val, priv->reg_base + reg);
323 }
324 
325 /**
326  * xcan_read_reg_le - Read a value from the device register little endian
327  * @priv:	Driver private data structure
328  * @reg:	Register offset
329  *
330  * Read data from the particular CAN register
331  * Return: value read from the CAN register
332  */
xcan_read_reg_le(const struct xcan_priv * priv,enum xcan_reg reg)333 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
334 {
335 	return ioread32(priv->reg_base + reg);
336 }
337 
338 /**
339  * xcan_write_reg_be - Write a value to the device register big endian
340  * @priv:	Driver private data structure
341  * @reg:	Register offset
342  * @val:	Value to write at the Register offset
343  *
344  * Write data to the paricular CAN register
345  */
xcan_write_reg_be(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)346 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
347 			      u32 val)
348 {
349 	iowrite32be(val, priv->reg_base + reg);
350 }
351 
352 /**
353  * xcan_read_reg_be - Read a value from the device register big endian
354  * @priv:	Driver private data structure
355  * @reg:	Register offset
356  *
357  * Read data from the particular CAN register
358  * Return: value read from the CAN register
359  */
xcan_read_reg_be(const struct xcan_priv * priv,enum xcan_reg reg)360 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
361 {
362 	return ioread32be(priv->reg_base + reg);
363 }
364 
365 /**
366  * xcan_rx_int_mask - Get the mask for the receive interrupt
367  * @priv:	Driver private data structure
368  *
369  * Return: The receive interrupt mask used by the driver on this HW
370  */
xcan_rx_int_mask(const struct xcan_priv * priv)371 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
372 {
373 	/* RXNEMP is better suited for our use case as it cannot be cleared
374 	 * while the FIFO is non-empty, but CAN FD HW does not have it
375 	 */
376 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
377 		return XCAN_IXR_RXOK_MASK;
378 	else
379 		return XCAN_IXR_RXNEMP_MASK;
380 }
381 
382 /**
383  * set_reset_mode - Resets the CAN device mode
384  * @ndev:	Pointer to net_device structure
385  *
386  * This is the driver reset mode routine.The driver
387  * enters into configuration mode.
388  *
389  * Return: 0 on success and failure value on error
390  */
set_reset_mode(struct net_device * ndev)391 static int set_reset_mode(struct net_device *ndev)
392 {
393 	struct xcan_priv *priv = netdev_priv(ndev);
394 	unsigned long timeout;
395 
396 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
397 
398 	timeout = jiffies + XCAN_TIMEOUT;
399 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
400 		if (time_after(jiffies, timeout)) {
401 			netdev_warn(ndev, "timed out for config mode\n");
402 			return -ETIMEDOUT;
403 		}
404 		usleep_range(500, 10000);
405 	}
406 
407 	/* reset clears FIFOs */
408 	priv->tx_head = 0;
409 	priv->tx_tail = 0;
410 
411 	return 0;
412 }
413 
414 /**
415  * xcan_set_bittiming - CAN set bit timing routine
416  * @ndev:	Pointer to net_device structure
417  *
418  * This is the driver set bittiming  routine.
419  * Return: 0 on success and failure value on error
420  */
xcan_set_bittiming(struct net_device * ndev)421 static int xcan_set_bittiming(struct net_device *ndev)
422 {
423 	struct xcan_priv *priv = netdev_priv(ndev);
424 	struct can_bittiming *bt = &priv->can.bittiming;
425 	struct can_bittiming *dbt = &priv->can.data_bittiming;
426 	u32 btr0, btr1;
427 	u32 is_config_mode;
428 
429 	/* Check whether Xilinx CAN is in configuration mode.
430 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
431 	 */
432 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
433 				XCAN_SR_CONFIG_MASK;
434 	if (!is_config_mode) {
435 		netdev_alert(ndev,
436 			     "BUG! Cannot set bittiming - CAN is not in config mode\n");
437 		return -EPERM;
438 	}
439 
440 	/* Setting Baud Rate prescaler value in BRPR Register */
441 	btr0 = (bt->brp - 1);
442 
443 	/* Setting Time Segment 1 in BTR Register */
444 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
445 
446 	/* Setting Time Segment 2 in BTR Register */
447 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
448 
449 	/* Setting Synchronous jump width in BTR Register */
450 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
451 
452 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
453 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
454 
455 	if (priv->devtype.cantype == XAXI_CANFD ||
456 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
457 		/* Setting Baud Rate prescaler value in F_BRPR Register */
458 		btr0 = dbt->brp - 1;
459 		if (can_tdc_is_enabled(&priv->can)) {
460 			if (priv->devtype.cantype == XAXI_CANFD)
461 				btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
462 					XCAN_BRPR_TDC_ENABLE;
463 			else
464 				btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
465 					XCAN_BRPR_TDC_ENABLE;
466 		}
467 
468 		/* Setting Time Segment 1 in BTR Register */
469 		btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
470 
471 		/* Setting Time Segment 2 in BTR Register */
472 		btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
473 
474 		/* Setting Synchronous jump width in BTR Register */
475 		btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
476 
477 		priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
478 		priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
479 	}
480 
481 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
482 		   priv->read_reg(priv, XCAN_BRPR_OFFSET),
483 		   priv->read_reg(priv, XCAN_BTR_OFFSET));
484 
485 	return 0;
486 }
487 
488 /**
489  * xcan_chip_start - This the drivers start routine
490  * @ndev:	Pointer to net_device structure
491  *
492  * This is the drivers start routine.
493  * Based on the State of the CAN device it puts
494  * the CAN device into a proper mode.
495  *
496  * Return: 0 on success and failure value on error
497  */
xcan_chip_start(struct net_device * ndev)498 static int xcan_chip_start(struct net_device *ndev)
499 {
500 	struct xcan_priv *priv = netdev_priv(ndev);
501 	u32 reg_msr;
502 	int err;
503 	u32 ier;
504 
505 	/* Check if it is in reset mode */
506 	err = set_reset_mode(ndev);
507 	if (err < 0)
508 		return err;
509 
510 	err = xcan_set_bittiming(ndev);
511 	if (err < 0)
512 		return err;
513 
514 	/* Enable interrupts
515 	 *
516 	 * We enable the ERROR interrupt even with
517 	 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
518 	 * dedicated interrupt for a state change to
519 	 * ERROR_WARNING/ERROR_PASSIVE.
520 	 */
521 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
522 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
523 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
524 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
525 
526 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
527 		ier |= XCAN_IXR_RXMNF_MASK;
528 
529 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
530 
531 	/* Check whether it is loopback mode or normal mode  */
532 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
533 		reg_msr = XCAN_MSR_LBACK_MASK;
534 	else
535 		reg_msr = 0x0;
536 
537 	/* enable the first extended filter, if any, as cores with extended
538 	 * filtering default to non-receipt if all filters are disabled
539 	 */
540 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
541 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
542 
543 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
544 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
545 
546 	netdev_dbg(ndev, "status:#x%08x\n",
547 		   priv->read_reg(priv, XCAN_SR_OFFSET));
548 
549 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
550 	return 0;
551 }
552 
553 /**
554  * xcan_do_set_mode - This sets the mode of the driver
555  * @ndev:	Pointer to net_device structure
556  * @mode:	Tells the mode of the driver
557  *
558  * This check the drivers state and calls the corresponding modes to set.
559  *
560  * Return: 0 on success and failure value on error
561  */
xcan_do_set_mode(struct net_device * ndev,enum can_mode mode)562 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
563 {
564 	int ret;
565 
566 	switch (mode) {
567 	case CAN_MODE_START:
568 		ret = xcan_chip_start(ndev);
569 		if (ret < 0) {
570 			netdev_err(ndev, "xcan_chip_start failed!\n");
571 			return ret;
572 		}
573 		netif_wake_queue(ndev);
574 		break;
575 	default:
576 		ret = -EOPNOTSUPP;
577 		break;
578 	}
579 
580 	return ret;
581 }
582 
583 /**
584  * xcan_write_frame - Write a frame to HW
585  * @ndev:		Pointer to net_device structure
586  * @skb:		sk_buff pointer that contains data to be Txed
587  * @frame_offset:	Register offset to write the frame to
588  */
xcan_write_frame(struct net_device * ndev,struct sk_buff * skb,int frame_offset)589 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
590 			     int frame_offset)
591 {
592 	u32 id, dlc, data[2] = {0, 0};
593 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
594 	u32 ramoff, dwindex = 0, i;
595 	struct xcan_priv *priv = netdev_priv(ndev);
596 
597 	/* Watch carefully on the bit sequence */
598 	if (cf->can_id & CAN_EFF_FLAG) {
599 		/* Extended CAN ID format */
600 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
601 			XCAN_IDR_ID2_MASK;
602 		id |= (((cf->can_id & CAN_EFF_MASK) >>
603 			(CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
604 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
605 
606 		/* The substibute remote TX request bit should be "1"
607 		 * for extended frames as in the Xilinx CAN datasheet
608 		 */
609 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
610 
611 		if (cf->can_id & CAN_RTR_FLAG)
612 			/* Extended frames remote TX request */
613 			id |= XCAN_IDR_RTR_MASK;
614 	} else {
615 		/* Standard CAN ID format */
616 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
617 			XCAN_IDR_ID1_MASK;
618 
619 		if (cf->can_id & CAN_RTR_FLAG)
620 			/* Standard frames remote TX request */
621 			id |= XCAN_IDR_SRR_MASK;
622 	}
623 
624 	dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
625 	if (can_is_canfd_skb(skb)) {
626 		if (cf->flags & CANFD_BRS)
627 			dlc |= XCAN_DLCR_BRS_MASK;
628 		dlc |= XCAN_DLCR_EDL_MASK;
629 	}
630 
631 	if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
632 	    (priv->devtype.flags & XCAN_FLAG_TXFEMP))
633 		can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
634 	else
635 		can_put_echo_skb(skb, ndev, 0, 0);
636 
637 	priv->tx_head++;
638 
639 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
640 	/* If the CAN frame is RTR frame this write triggers transmission
641 	 * (not on CAN FD)
642 	 */
643 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
644 	if (priv->devtype.cantype == XAXI_CANFD ||
645 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
646 		for (i = 0; i < cf->len; i += 4) {
647 			ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
648 					(dwindex * XCANFD_DW_BYTES);
649 			priv->write_reg(priv, ramoff,
650 					be32_to_cpup((__be32 *)(cf->data + i)));
651 			dwindex++;
652 		}
653 	} else {
654 		if (cf->len > 0)
655 			data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
656 		if (cf->len > 4)
657 			data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
658 
659 		if (!(cf->can_id & CAN_RTR_FLAG)) {
660 			priv->write_reg(priv,
661 					XCAN_FRAME_DW1_OFFSET(frame_offset),
662 					data[0]);
663 			/* If the CAN frame is Standard/Extended frame this
664 			 * write triggers transmission (not on CAN FD)
665 			 */
666 			priv->write_reg(priv,
667 					XCAN_FRAME_DW2_OFFSET(frame_offset),
668 					data[1]);
669 		}
670 	}
671 }
672 
673 /**
674  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
675  * @skb:	sk_buff pointer that contains data to be Txed
676  * @ndev:	Pointer to net_device structure
677  *
678  * Return: 0 on success, -ENOSPC if FIFO is full.
679  */
xcan_start_xmit_fifo(struct sk_buff * skb,struct net_device * ndev)680 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
681 {
682 	struct xcan_priv *priv = netdev_priv(ndev);
683 	unsigned long flags;
684 
685 	/* Check if the TX buffer is full */
686 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
687 			XCAN_SR_TXFLL_MASK))
688 		return -ENOSPC;
689 
690 	spin_lock_irqsave(&priv->tx_lock, flags);
691 
692 	xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
693 
694 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
695 	if (priv->tx_max > 1)
696 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
697 
698 	/* Check if the TX buffer is full */
699 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
700 		netif_stop_queue(ndev);
701 
702 	spin_unlock_irqrestore(&priv->tx_lock, flags);
703 
704 	return 0;
705 }
706 
707 /**
708  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
709  * @skb:	sk_buff pointer that contains data to be Txed
710  * @ndev:	Pointer to net_device structure
711  *
712  * Return: 0 on success, -ENOSPC if there is no space
713  */
xcan_start_xmit_mailbox(struct sk_buff * skb,struct net_device * ndev)714 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
715 {
716 	struct xcan_priv *priv = netdev_priv(ndev);
717 	unsigned long flags;
718 
719 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
720 		     BIT(XCAN_TX_MAILBOX_IDX)))
721 		return -ENOSPC;
722 
723 	spin_lock_irqsave(&priv->tx_lock, flags);
724 
725 	xcan_write_frame(ndev, skb,
726 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
727 
728 	/* Mark buffer as ready for transmit */
729 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
730 
731 	netif_stop_queue(ndev);
732 
733 	spin_unlock_irqrestore(&priv->tx_lock, flags);
734 
735 	return 0;
736 }
737 
738 /**
739  * xcan_start_xmit - Starts the transmission
740  * @skb:	sk_buff pointer that contains data to be Txed
741  * @ndev:	Pointer to net_device structure
742  *
743  * This function is invoked from upper layers to initiate transmission.
744  *
745  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
746  */
xcan_start_xmit(struct sk_buff * skb,struct net_device * ndev)747 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
748 {
749 	struct xcan_priv *priv = netdev_priv(ndev);
750 	int ret;
751 
752 	if (can_dev_dropped_skb(ndev, skb))
753 		return NETDEV_TX_OK;
754 
755 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
756 		ret = xcan_start_xmit_mailbox(skb, ndev);
757 	else
758 		ret = xcan_start_xmit_fifo(skb, ndev);
759 
760 	if (ret < 0) {
761 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
762 		netif_stop_queue(ndev);
763 		return NETDEV_TX_BUSY;
764 	}
765 
766 	return NETDEV_TX_OK;
767 }
768 
769 /**
770  * xcan_rx -  Is called from CAN isr to complete the received
771  *		frame  processing
772  * @ndev:	Pointer to net_device structure
773  * @frame_base:	Register offset to the frame to be read
774  *
775  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
776  * does minimal processing and invokes "netif_receive_skb" to complete further
777  * processing.
778  * Return: 1 on success and 0 on failure.
779  */
xcan_rx(struct net_device * ndev,int frame_base)780 static int xcan_rx(struct net_device *ndev, int frame_base)
781 {
782 	struct xcan_priv *priv = netdev_priv(ndev);
783 	struct net_device_stats *stats = &ndev->stats;
784 	struct can_frame *cf;
785 	struct sk_buff *skb;
786 	u32 id_xcan, dlc, data[2] = {0, 0};
787 
788 	skb = alloc_can_skb(ndev, &cf);
789 	if (unlikely(!skb)) {
790 		stats->rx_dropped++;
791 		return 0;
792 	}
793 
794 	/* Read a frame from Xilinx zynq CANPS */
795 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
796 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
797 				   XCAN_DLCR_DLC_SHIFT;
798 
799 	/* Change Xilinx CAN data length format to socketCAN data format */
800 	cf->len = can_cc_dlc2len(dlc);
801 
802 	/* Change Xilinx CAN ID format to socketCAN ID format */
803 	if (id_xcan & XCAN_IDR_IDE_MASK) {
804 		/* The received frame is an Extended format frame */
805 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
806 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
807 				XCAN_IDR_ID2_SHIFT;
808 		cf->can_id |= CAN_EFF_FLAG;
809 		if (id_xcan & XCAN_IDR_RTR_MASK)
810 			cf->can_id |= CAN_RTR_FLAG;
811 	} else {
812 		/* The received frame is a standard format frame */
813 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
814 				XCAN_IDR_ID1_SHIFT;
815 		if (id_xcan & XCAN_IDR_SRR_MASK)
816 			cf->can_id |= CAN_RTR_FLAG;
817 	}
818 
819 	/* DW1/DW2 must always be read to remove message from RXFIFO */
820 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
821 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
822 
823 	if (!(cf->can_id & CAN_RTR_FLAG)) {
824 		/* Change Xilinx CAN data format to socketCAN data format */
825 		if (cf->len > 0)
826 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
827 		if (cf->len > 4)
828 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
829 
830 		stats->rx_bytes += cf->len;
831 	}
832 	stats->rx_packets++;
833 
834 	netif_receive_skb(skb);
835 
836 	return 1;
837 }
838 
839 /**
840  * xcanfd_rx -  Is called from CAN isr to complete the received
841  *		frame  processing
842  * @ndev:	Pointer to net_device structure
843  * @frame_base:	Register offset to the frame to be read
844  *
845  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
846  * does minimal processing and invokes "netif_receive_skb" to complete further
847  * processing.
848  * Return: 1 on success and 0 on failure.
849  */
xcanfd_rx(struct net_device * ndev,int frame_base)850 static int xcanfd_rx(struct net_device *ndev, int frame_base)
851 {
852 	struct xcan_priv *priv = netdev_priv(ndev);
853 	struct net_device_stats *stats = &ndev->stats;
854 	struct canfd_frame *cf;
855 	struct sk_buff *skb;
856 	u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
857 
858 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
859 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
860 	if (dlc & XCAN_DLCR_EDL_MASK)
861 		skb = alloc_canfd_skb(ndev, &cf);
862 	else
863 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
864 
865 	if (unlikely(!skb)) {
866 		stats->rx_dropped++;
867 		return 0;
868 	}
869 
870 	/* Change Xilinx CANFD data length format to socketCAN data
871 	 * format
872 	 */
873 	if (dlc & XCAN_DLCR_EDL_MASK)
874 		cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
875 				  XCAN_DLCR_DLC_SHIFT);
876 	else
877 		cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
878 					  XCAN_DLCR_DLC_SHIFT);
879 
880 	/* Change Xilinx CAN ID format to socketCAN ID format */
881 	if (id_xcan & XCAN_IDR_IDE_MASK) {
882 		/* The received frame is an Extended format frame */
883 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
884 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
885 				XCAN_IDR_ID2_SHIFT;
886 		cf->can_id |= CAN_EFF_FLAG;
887 		if (id_xcan & XCAN_IDR_RTR_MASK)
888 			cf->can_id |= CAN_RTR_FLAG;
889 	} else {
890 		/* The received frame is a standard format frame */
891 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
892 				XCAN_IDR_ID1_SHIFT;
893 		if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
894 					XCAN_IDR_SRR_MASK))
895 			cf->can_id |= CAN_RTR_FLAG;
896 	}
897 
898 	/* Check the frame received is FD or not*/
899 	if (dlc & XCAN_DLCR_EDL_MASK) {
900 		for (i = 0; i < cf->len; i += 4) {
901 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
902 					(dwindex * XCANFD_DW_BYTES);
903 			data[0] = priv->read_reg(priv, dw_offset);
904 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
905 			dwindex++;
906 		}
907 	} else {
908 		for (i = 0; i < cf->len; i += 4) {
909 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
910 			data[0] = priv->read_reg(priv, dw_offset + i);
911 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
912 		}
913 	}
914 
915 	if (!(cf->can_id & CAN_RTR_FLAG))
916 		stats->rx_bytes += cf->len;
917 	stats->rx_packets++;
918 
919 	netif_receive_skb(skb);
920 
921 	return 1;
922 }
923 
924 /**
925  * xcan_current_error_state - Get current error state from HW
926  * @ndev:	Pointer to net_device structure
927  *
928  * Checks the current CAN error state from the HW. Note that this
929  * only checks for ERROR_PASSIVE and ERROR_WARNING.
930  *
931  * Return:
932  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
933  * otherwise.
934  */
xcan_current_error_state(struct net_device * ndev)935 static enum can_state xcan_current_error_state(struct net_device *ndev)
936 {
937 	struct xcan_priv *priv = netdev_priv(ndev);
938 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
939 
940 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
941 		return CAN_STATE_ERROR_PASSIVE;
942 	else if (status & XCAN_SR_ERRWRN_MASK)
943 		return CAN_STATE_ERROR_WARNING;
944 	else
945 		return CAN_STATE_ERROR_ACTIVE;
946 }
947 
948 /**
949  * xcan_set_error_state - Set new CAN error state
950  * @ndev:	Pointer to net_device structure
951  * @new_state:	The new CAN state to be set
952  * @cf:		Error frame to be populated or NULL
953  *
954  * Set new CAN error state for the device, updating statistics and
955  * populating the error frame if given.
956  */
xcan_set_error_state(struct net_device * ndev,enum can_state new_state,struct can_frame * cf)957 static void xcan_set_error_state(struct net_device *ndev,
958 				 enum can_state new_state,
959 				 struct can_frame *cf)
960 {
961 	struct xcan_priv *priv = netdev_priv(ndev);
962 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
963 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
964 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
965 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
966 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
967 
968 	/* non-ERROR states are handled elsewhere */
969 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
970 		return;
971 
972 	can_change_state(ndev, cf, tx_state, rx_state);
973 
974 	if (cf) {
975 		cf->can_id |= CAN_ERR_CNT;
976 		cf->data[6] = txerr;
977 		cf->data[7] = rxerr;
978 	}
979 }
980 
981 /**
982  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
983  * @ndev:	Pointer to net_device structure
984  *
985  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
986  * the performed RX/TX has caused it to drop to a lesser state and set
987  * the interface state accordingly.
988  */
xcan_update_error_state_after_rxtx(struct net_device * ndev)989 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
990 {
991 	struct xcan_priv *priv = netdev_priv(ndev);
992 	enum can_state old_state = priv->can.state;
993 	enum can_state new_state;
994 
995 	/* changing error state due to successful frame RX/TX can only
996 	 * occur from these states
997 	 */
998 	if (old_state != CAN_STATE_ERROR_WARNING &&
999 	    old_state != CAN_STATE_ERROR_PASSIVE)
1000 		return;
1001 
1002 	new_state = xcan_current_error_state(ndev);
1003 
1004 	if (new_state != old_state) {
1005 		struct sk_buff *skb;
1006 		struct can_frame *cf;
1007 
1008 		skb = alloc_can_err_skb(ndev, &cf);
1009 
1010 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
1011 
1012 		if (skb)
1013 			netif_rx(skb);
1014 	}
1015 }
1016 
1017 /**
1018  * xcan_err_interrupt - error frame Isr
1019  * @ndev:	net_device pointer
1020  * @isr:	interrupt status register value
1021  *
1022  * This is the CAN error interrupt and it will
1023  * check the type of error and forward the error
1024  * frame to upper layers.
1025  */
xcan_err_interrupt(struct net_device * ndev,u32 isr)1026 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1027 {
1028 	struct xcan_priv *priv = netdev_priv(ndev);
1029 	struct net_device_stats *stats = &ndev->stats;
1030 	struct can_frame cf = { };
1031 	u32 err_status;
1032 
1033 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
1034 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
1035 
1036 	if (isr & XCAN_IXR_BSOFF_MASK) {
1037 		priv->can.state = CAN_STATE_BUS_OFF;
1038 		priv->can.can_stats.bus_off++;
1039 		/* Leave device in Config Mode in bus-off state */
1040 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1041 		can_bus_off(ndev);
1042 		cf.can_id |= CAN_ERR_BUSOFF;
1043 	} else {
1044 		enum can_state new_state = xcan_current_error_state(ndev);
1045 
1046 		if (new_state != priv->can.state)
1047 			xcan_set_error_state(ndev, new_state, &cf);
1048 	}
1049 
1050 	/* Check for Arbitration lost interrupt */
1051 	if (isr & XCAN_IXR_ARBLST_MASK) {
1052 		priv->can.can_stats.arbitration_lost++;
1053 		cf.can_id |= CAN_ERR_LOSTARB;
1054 		cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1055 	}
1056 
1057 	/* Check for RX FIFO Overflow interrupt */
1058 	if (isr & XCAN_IXR_RXOFLW_MASK) {
1059 		stats->rx_over_errors++;
1060 		stats->rx_errors++;
1061 		cf.can_id |= CAN_ERR_CRTL;
1062 		cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1063 	}
1064 
1065 	/* Check for RX Match Not Finished interrupt */
1066 	if (isr & XCAN_IXR_RXMNF_MASK) {
1067 		stats->rx_dropped++;
1068 		stats->rx_errors++;
1069 		netdev_err(ndev, "RX match not finished, frame discarded\n");
1070 		cf.can_id |= CAN_ERR_CRTL;
1071 		cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1072 	}
1073 
1074 	/* Check for error interrupt */
1075 	if (isr & XCAN_IXR_ERROR_MASK) {
1076 		bool berr_reporting = false;
1077 
1078 		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1079 			berr_reporting = true;
1080 			cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1081 		}
1082 
1083 		/* Check for Ack error interrupt */
1084 		if (err_status & XCAN_ESR_ACKER_MASK) {
1085 			stats->tx_errors++;
1086 			if (berr_reporting) {
1087 				cf.can_id |= CAN_ERR_ACK;
1088 				cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1089 			}
1090 		}
1091 
1092 		/* Check for Bit error interrupt */
1093 		if (err_status & XCAN_ESR_BERR_MASK) {
1094 			stats->tx_errors++;
1095 			if (berr_reporting) {
1096 				cf.can_id |= CAN_ERR_PROT;
1097 				cf.data[2] = CAN_ERR_PROT_BIT;
1098 			}
1099 		}
1100 
1101 		/* Check for Stuff error interrupt */
1102 		if (err_status & XCAN_ESR_STER_MASK) {
1103 			stats->rx_errors++;
1104 			if (berr_reporting) {
1105 				cf.can_id |= CAN_ERR_PROT;
1106 				cf.data[2] = CAN_ERR_PROT_STUFF;
1107 			}
1108 		}
1109 
1110 		/* Check for Form error interrupt */
1111 		if (err_status & XCAN_ESR_FMER_MASK) {
1112 			stats->rx_errors++;
1113 			if (berr_reporting) {
1114 				cf.can_id |= CAN_ERR_PROT;
1115 				cf.data[2] = CAN_ERR_PROT_FORM;
1116 			}
1117 		}
1118 
1119 		/* Check for CRC error interrupt */
1120 		if (err_status & XCAN_ESR_CRCER_MASK) {
1121 			stats->rx_errors++;
1122 			if (berr_reporting) {
1123 				cf.can_id |= CAN_ERR_PROT;
1124 				cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1125 			}
1126 		}
1127 		priv->can.can_stats.bus_error++;
1128 	}
1129 
1130 	if (cf.can_id) {
1131 		struct can_frame *skb_cf;
1132 		struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1133 
1134 		if (skb) {
1135 			skb_cf->can_id |= cf.can_id;
1136 			memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1137 			netif_rx(skb);
1138 		}
1139 	}
1140 
1141 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
1142 		   __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1143 }
1144 
1145 /**
1146  * xcan_state_interrupt - It will check the state of the CAN device
1147  * @ndev:	net_device pointer
1148  * @isr:	interrupt status register value
1149  *
1150  * This will checks the state of the CAN device
1151  * and puts the device into appropriate state.
1152  */
xcan_state_interrupt(struct net_device * ndev,u32 isr)1153 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1154 {
1155 	struct xcan_priv *priv = netdev_priv(ndev);
1156 
1157 	/* Check for Sleep interrupt if set put CAN device in sleep state */
1158 	if (isr & XCAN_IXR_SLP_MASK)
1159 		priv->can.state = CAN_STATE_SLEEPING;
1160 
1161 	/* Check for Wake up interrupt if set put CAN device in Active state */
1162 	if (isr & XCAN_IXR_WKUP_MASK)
1163 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1164 }
1165 
1166 /**
1167  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1168  * @priv:	Driver private data structure
1169  *
1170  * Return: Register offset of the next frame in RX FIFO.
1171  */
xcan_rx_fifo_get_next_frame(struct xcan_priv * priv)1172 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1173 {
1174 	int offset;
1175 
1176 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1177 		u32 fsr, mask;
1178 
1179 		/* clear RXOK before the is-empty check so that any newly
1180 		 * received frame will reassert it without a race
1181 		 */
1182 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1183 
1184 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1185 
1186 		/* check if RX FIFO is empty */
1187 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1188 			mask = XCAN_2_FSR_FL_MASK;
1189 		else
1190 			mask = XCAN_FSR_FL_MASK;
1191 
1192 		if (!(fsr & mask))
1193 			return -ENOENT;
1194 
1195 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1196 			offset =
1197 			  XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1198 		else
1199 			offset =
1200 			  XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1201 
1202 	} else {
1203 		/* check if RX FIFO is empty */
1204 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1205 		      XCAN_IXR_RXNEMP_MASK))
1206 			return -ENOENT;
1207 
1208 		/* frames are read from a static offset */
1209 		offset = XCAN_RXFIFO_OFFSET;
1210 	}
1211 
1212 	return offset;
1213 }
1214 
1215 /**
1216  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1217  * @napi:	napi structure pointer
1218  * @quota:	Max number of rx packets to be processed.
1219  *
1220  * This is the poll routine for rx part.
1221  * It will process the packets maximux quota value.
1222  *
1223  * Return: number of packets received
1224  */
xcan_rx_poll(struct napi_struct * napi,int quota)1225 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1226 {
1227 	struct net_device *ndev = napi->dev;
1228 	struct xcan_priv *priv = netdev_priv(ndev);
1229 	u32 ier;
1230 	int work_done = 0;
1231 	int frame_offset;
1232 
1233 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1234 	       (work_done < quota)) {
1235 		if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1236 			work_done += xcanfd_rx(ndev, frame_offset);
1237 		else
1238 			work_done += xcan_rx(ndev, frame_offset);
1239 
1240 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1241 			/* increment read index */
1242 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1243 					XCAN_FSR_IRI_MASK);
1244 		else
1245 			/* clear rx-not-empty (will actually clear only if
1246 			 * empty)
1247 			 */
1248 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1249 					XCAN_IXR_RXNEMP_MASK);
1250 	}
1251 
1252 	if (work_done)
1253 		xcan_update_error_state_after_rxtx(ndev);
1254 
1255 	if (work_done < quota) {
1256 		if (napi_complete_done(napi, work_done)) {
1257 			ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1258 			ier |= xcan_rx_int_mask(priv);
1259 			priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1260 		}
1261 	}
1262 	return work_done;
1263 }
1264 
1265 /**
1266  * xcan_tx_interrupt - Tx Done Isr
1267  * @ndev:	net_device pointer
1268  * @isr:	Interrupt status register value
1269  */
xcan_tx_interrupt(struct net_device * ndev,u32 isr)1270 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1271 {
1272 	struct xcan_priv *priv = netdev_priv(ndev);
1273 	struct net_device_stats *stats = &ndev->stats;
1274 	unsigned int frames_in_fifo;
1275 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1276 	unsigned long flags;
1277 	int retries = 0;
1278 
1279 	/* Synchronize with xmit as we need to know the exact number
1280 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1281 	 * handling.
1282 	 * This also prevents a race between netif_wake_queue() and
1283 	 * netif_stop_queue().
1284 	 */
1285 	spin_lock_irqsave(&priv->tx_lock, flags);
1286 
1287 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1288 
1289 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1290 		/* clear TXOK anyway to avoid getting back here */
1291 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1292 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1293 		return;
1294 	}
1295 
1296 	/* Check if 2 frames were sent (TXOK only means that at least 1
1297 	 * frame was sent).
1298 	 */
1299 	if (frames_in_fifo > 1) {
1300 		WARN_ON(frames_in_fifo > priv->tx_max);
1301 
1302 		/* Synchronize TXOK and isr so that after the loop:
1303 		 * (1) isr variable is up-to-date at least up to TXOK clear
1304 		 *     time. This avoids us clearing a TXOK of a second frame
1305 		 *     but not noticing that the FIFO is now empty and thus
1306 		 *     marking only a single frame as sent.
1307 		 * (2) No TXOK is left. Having one could mean leaving a
1308 		 *     stray TXOK as we might process the associated frame
1309 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1310 		 *     clear to satisfy (1).
1311 		 */
1312 		while ((isr & XCAN_IXR_TXOK_MASK) &&
1313 		       !WARN_ON(++retries == 100)) {
1314 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1315 					XCAN_IXR_TXOK_MASK);
1316 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1317 		}
1318 
1319 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1320 			/* nothing in FIFO anymore */
1321 			frames_sent = frames_in_fifo;
1322 		}
1323 	} else {
1324 		/* single frame in fifo, just clear TXOK */
1325 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1326 	}
1327 
1328 	while (frames_sent--) {
1329 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1330 						    priv->tx_max, NULL);
1331 		priv->tx_tail++;
1332 		stats->tx_packets++;
1333 	}
1334 
1335 	netif_wake_queue(ndev);
1336 
1337 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1338 
1339 	xcan_update_error_state_after_rxtx(ndev);
1340 }
1341 
1342 /**
1343  * xcan_interrupt - CAN Isr
1344  * @irq:	irq number
1345  * @dev_id:	device id pointer
1346  *
1347  * This is the xilinx CAN Isr. It checks for the type of interrupt
1348  * and invokes the corresponding ISR.
1349  *
1350  * Return:
1351  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1352  */
xcan_interrupt(int irq,void * dev_id)1353 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1354 {
1355 	struct net_device *ndev = (struct net_device *)dev_id;
1356 	struct xcan_priv *priv = netdev_priv(ndev);
1357 	u32 isr, ier;
1358 	u32 isr_errors;
1359 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1360 
1361 	/* Get the interrupt status from Xilinx CAN */
1362 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1363 	if (!isr)
1364 		return IRQ_NONE;
1365 
1366 	/* Check for the type of interrupt and Processing it */
1367 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1368 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1369 				XCAN_IXR_WKUP_MASK));
1370 		xcan_state_interrupt(ndev, isr);
1371 	}
1372 
1373 	/* Check for Tx interrupt and Processing it */
1374 	if (isr & XCAN_IXR_TXOK_MASK)
1375 		xcan_tx_interrupt(ndev, isr);
1376 
1377 	/* Check for the type of error interrupt and Processing it */
1378 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1379 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1380 			    XCAN_IXR_RXMNF_MASK);
1381 	if (isr_errors) {
1382 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1383 		xcan_err_interrupt(ndev, isr);
1384 	}
1385 
1386 	/* Check for the type of receive interrupt and Processing it */
1387 	if (isr & rx_int_mask) {
1388 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1389 		ier &= ~rx_int_mask;
1390 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1391 		napi_schedule(&priv->napi);
1392 	}
1393 	return IRQ_HANDLED;
1394 }
1395 
1396 /**
1397  * xcan_chip_stop - Driver stop routine
1398  * @ndev:	Pointer to net_device structure
1399  *
1400  * This is the drivers stop routine. It will disable the
1401  * interrupts and put the device into configuration mode.
1402  */
xcan_chip_stop(struct net_device * ndev)1403 static void xcan_chip_stop(struct net_device *ndev)
1404 {
1405 	struct xcan_priv *priv = netdev_priv(ndev);
1406 	int ret;
1407 
1408 	/* Disable interrupts and leave the can in configuration mode */
1409 	ret = set_reset_mode(ndev);
1410 	if (ret < 0)
1411 		netdev_dbg(ndev, "set_reset_mode() Failed\n");
1412 
1413 	priv->can.state = CAN_STATE_STOPPED;
1414 }
1415 
1416 /**
1417  * xcan_open - Driver open routine
1418  * @ndev:	Pointer to net_device structure
1419  *
1420  * This is the driver open routine.
1421  * Return: 0 on success and failure value on error
1422  */
xcan_open(struct net_device * ndev)1423 static int xcan_open(struct net_device *ndev)
1424 {
1425 	struct xcan_priv *priv = netdev_priv(ndev);
1426 	int ret;
1427 
1428 	ret = phy_power_on(priv->transceiver);
1429 	if (ret)
1430 		return ret;
1431 
1432 	ret = pm_runtime_get_sync(priv->dev);
1433 	if (ret < 0) {
1434 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1435 			   __func__, ret);
1436 		goto err;
1437 	}
1438 
1439 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1440 			  ndev->name, ndev);
1441 	if (ret < 0) {
1442 		netdev_err(ndev, "irq allocation for CAN failed\n");
1443 		goto err;
1444 	}
1445 
1446 	/* Set chip into reset mode */
1447 	ret = set_reset_mode(ndev);
1448 	if (ret < 0) {
1449 		netdev_err(ndev, "mode resetting failed!\n");
1450 		goto err_irq;
1451 	}
1452 
1453 	/* Common open */
1454 	ret = open_candev(ndev);
1455 	if (ret)
1456 		goto err_irq;
1457 
1458 	ret = xcan_chip_start(ndev);
1459 	if (ret < 0) {
1460 		netdev_err(ndev, "xcan_chip_start failed!\n");
1461 		goto err_candev;
1462 	}
1463 
1464 	napi_enable(&priv->napi);
1465 	netif_start_queue(ndev);
1466 
1467 	return 0;
1468 
1469 err_candev:
1470 	close_candev(ndev);
1471 err_irq:
1472 	free_irq(ndev->irq, ndev);
1473 err:
1474 	pm_runtime_put(priv->dev);
1475 	phy_power_off(priv->transceiver);
1476 
1477 	return ret;
1478 }
1479 
1480 /**
1481  * xcan_close - Driver close routine
1482  * @ndev:	Pointer to net_device structure
1483  *
1484  * Return: 0 always
1485  */
xcan_close(struct net_device * ndev)1486 static int xcan_close(struct net_device *ndev)
1487 {
1488 	struct xcan_priv *priv = netdev_priv(ndev);
1489 
1490 	netif_stop_queue(ndev);
1491 	napi_disable(&priv->napi);
1492 	xcan_chip_stop(ndev);
1493 	free_irq(ndev->irq, ndev);
1494 	close_candev(ndev);
1495 
1496 	pm_runtime_put(priv->dev);
1497 	phy_power_off(priv->transceiver);
1498 
1499 	return 0;
1500 }
1501 
1502 /**
1503  * xcan_get_berr_counter - error counter routine
1504  * @ndev:	Pointer to net_device structure
1505  * @bec:	Pointer to can_berr_counter structure
1506  *
1507  * This is the driver error counter routine.
1508  * Return: 0 on success and failure value on error
1509  */
xcan_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1510 static int xcan_get_berr_counter(const struct net_device *ndev,
1511 				 struct can_berr_counter *bec)
1512 {
1513 	struct xcan_priv *priv = netdev_priv(ndev);
1514 	int ret;
1515 
1516 	ret = pm_runtime_get_sync(priv->dev);
1517 	if (ret < 0) {
1518 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1519 			   __func__, ret);
1520 		pm_runtime_put(priv->dev);
1521 		return ret;
1522 	}
1523 
1524 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1525 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1526 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1527 
1528 	pm_runtime_put(priv->dev);
1529 
1530 	return 0;
1531 }
1532 
1533 /**
1534  * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
1535  * @ndev:	Pointer to net_device structure
1536  * @tdcv:	Pointer to TDCV value
1537  *
1538  * Return: 0 on success
1539  */
xcan_get_auto_tdcv(const struct net_device * ndev,u32 * tdcv)1540 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
1541 {
1542 	struct xcan_priv *priv = netdev_priv(ndev);
1543 
1544 	*tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
1545 
1546 	return 0;
1547 }
1548 
1549 static const struct net_device_ops xcan_netdev_ops = {
1550 	.ndo_open	= xcan_open,
1551 	.ndo_stop	= xcan_close,
1552 	.ndo_start_xmit	= xcan_start_xmit,
1553 	.ndo_change_mtu	= can_change_mtu,
1554 };
1555 
1556 static const struct ethtool_ops xcan_ethtool_ops = {
1557 	.get_ts_info = ethtool_op_get_ts_info,
1558 };
1559 
1560 /**
1561  * xcan_suspend - Suspend method for the driver
1562  * @dev:	Address of the device structure
1563  *
1564  * Put the driver into low power mode.
1565  * Return: 0 on success and failure value on error
1566  */
xcan_suspend(struct device * dev)1567 static int __maybe_unused xcan_suspend(struct device *dev)
1568 {
1569 	struct net_device *ndev = dev_get_drvdata(dev);
1570 
1571 	if (netif_running(ndev)) {
1572 		netif_stop_queue(ndev);
1573 		netif_device_detach(ndev);
1574 		xcan_chip_stop(ndev);
1575 	}
1576 
1577 	return pm_runtime_force_suspend(dev);
1578 }
1579 
1580 /**
1581  * xcan_resume - Resume from suspend
1582  * @dev:	Address of the device structure
1583  *
1584  * Resume operation after suspend.
1585  * Return: 0 on success and failure value on error
1586  */
xcan_resume(struct device * dev)1587 static int __maybe_unused xcan_resume(struct device *dev)
1588 {
1589 	struct net_device *ndev = dev_get_drvdata(dev);
1590 	int ret;
1591 
1592 	ret = pm_runtime_force_resume(dev);
1593 	if (ret) {
1594 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1595 		return ret;
1596 	}
1597 
1598 	if (netif_running(ndev)) {
1599 		ret = xcan_chip_start(ndev);
1600 		if (ret) {
1601 			dev_err(dev, "xcan_chip_start failed on resume\n");
1602 			return ret;
1603 		}
1604 
1605 		netif_device_attach(ndev);
1606 		netif_start_queue(ndev);
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 /**
1613  * xcan_runtime_suspend - Runtime suspend method for the driver
1614  * @dev:	Address of the device structure
1615  *
1616  * Put the driver into low power mode.
1617  * Return: 0 always
1618  */
xcan_runtime_suspend(struct device * dev)1619 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1620 {
1621 	struct net_device *ndev = dev_get_drvdata(dev);
1622 	struct xcan_priv *priv = netdev_priv(ndev);
1623 
1624 	clk_disable_unprepare(priv->bus_clk);
1625 	clk_disable_unprepare(priv->can_clk);
1626 
1627 	return 0;
1628 }
1629 
1630 /**
1631  * xcan_runtime_resume - Runtime resume from suspend
1632  * @dev:	Address of the device structure
1633  *
1634  * Resume operation after suspend.
1635  * Return: 0 on success and failure value on error
1636  */
xcan_runtime_resume(struct device * dev)1637 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1638 {
1639 	struct net_device *ndev = dev_get_drvdata(dev);
1640 	struct xcan_priv *priv = netdev_priv(ndev);
1641 	int ret;
1642 
1643 	ret = clk_prepare_enable(priv->bus_clk);
1644 	if (ret) {
1645 		dev_err(dev, "Cannot enable clock.\n");
1646 		return ret;
1647 	}
1648 	ret = clk_prepare_enable(priv->can_clk);
1649 	if (ret) {
1650 		dev_err(dev, "Cannot enable clock.\n");
1651 		clk_disable_unprepare(priv->bus_clk);
1652 		return ret;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static const struct dev_pm_ops xcan_dev_pm_ops = {
1659 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1660 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1661 };
1662 
1663 static const struct xcan_devtype_data xcan_zynq_data = {
1664 	.cantype = XZYNQ_CANPS,
1665 	.flags = XCAN_FLAG_TXFEMP,
1666 	.bittiming_const = &xcan_bittiming_const,
1667 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1668 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1669 	.bus_clk_name = "pclk",
1670 };
1671 
1672 static const struct xcan_devtype_data xcan_axi_data = {
1673 	.cantype = XAXI_CAN,
1674 	.bittiming_const = &xcan_bittiming_const,
1675 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1676 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1677 	.bus_clk_name = "s_axi_aclk",
1678 };
1679 
1680 static const struct xcan_devtype_data xcan_canfd_data = {
1681 	.cantype = XAXI_CANFD,
1682 	.flags = XCAN_FLAG_EXT_FILTERS |
1683 		 XCAN_FLAG_RXMNF |
1684 		 XCAN_FLAG_TX_MAILBOXES |
1685 		 XCAN_FLAG_RX_FIFO_MULTI,
1686 	.bittiming_const = &xcan_bittiming_const_canfd,
1687 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1688 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1689 	.bus_clk_name = "s_axi_aclk",
1690 };
1691 
1692 static const struct xcan_devtype_data xcan_canfd2_data = {
1693 	.cantype = XAXI_CANFD_2_0,
1694 	.flags = XCAN_FLAG_EXT_FILTERS |
1695 		 XCAN_FLAG_RXMNF |
1696 		 XCAN_FLAG_TX_MAILBOXES |
1697 		 XCAN_FLAG_CANFD_2 |
1698 		 XCAN_FLAG_RX_FIFO_MULTI,
1699 	.bittiming_const = &xcan_bittiming_const_canfd2,
1700 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1701 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1702 	.bus_clk_name = "s_axi_aclk",
1703 };
1704 
1705 /* Match table for OF platform binding */
1706 static const struct of_device_id xcan_of_match[] = {
1707 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1708 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1709 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1710 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1711 	{ /* end of list */ },
1712 };
1713 MODULE_DEVICE_TABLE(of, xcan_of_match);
1714 
1715 /**
1716  * xcan_probe - Platform registration call
1717  * @pdev:	Handle to the platform device structure
1718  *
1719  * This function does all the memory allocation and registration for the CAN
1720  * device.
1721  *
1722  * Return: 0 on success and failure value on error
1723  */
xcan_probe(struct platform_device * pdev)1724 static int xcan_probe(struct platform_device *pdev)
1725 {
1726 	struct net_device *ndev;
1727 	struct xcan_priv *priv;
1728 	struct phy *transceiver;
1729 	const struct of_device_id *of_id;
1730 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1731 	void __iomem *addr;
1732 	int ret;
1733 	int rx_max, tx_max;
1734 	u32 hw_tx_max = 0, hw_rx_max = 0;
1735 	const char *hw_tx_max_property;
1736 
1737 	/* Get the virtual base address for the device */
1738 	addr = devm_platform_ioremap_resource(pdev, 0);
1739 	if (IS_ERR(addr)) {
1740 		ret = PTR_ERR(addr);
1741 		goto err;
1742 	}
1743 
1744 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1745 	if (of_id && of_id->data)
1746 		devtype = of_id->data;
1747 
1748 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1749 			     "tx-mailbox-count" : "tx-fifo-depth";
1750 
1751 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1752 				   &hw_tx_max);
1753 	if (ret < 0) {
1754 		dev_err(&pdev->dev, "missing %s property\n",
1755 			hw_tx_max_property);
1756 		goto err;
1757 	}
1758 
1759 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1760 				   &hw_rx_max);
1761 	if (ret < 0) {
1762 		dev_err(&pdev->dev,
1763 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1764 		goto err;
1765 	}
1766 
1767 	/* With TX FIFO:
1768 	 *
1769 	 * There is no way to directly figure out how many frames have been
1770 	 * sent when the TXOK interrupt is processed. If TXFEMP
1771 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1772 	 * to determine if 1 or 2 frames have been sent.
1773 	 * Theoretically we should be able to use TXFWMEMP to determine up
1774 	 * to 3 frames, but it seems that after putting a second frame in the
1775 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1776 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1777 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1778 	 * completely synchronized with the rest of the bits?
1779 	 *
1780 	 * With TX mailboxes:
1781 	 *
1782 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1783 	 * we submit frames one at a time.
1784 	 */
1785 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1786 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1787 		tx_max = min(hw_tx_max, 2U);
1788 	else
1789 		tx_max = 1;
1790 
1791 	rx_max = hw_rx_max;
1792 
1793 	/* Create a CAN device instance */
1794 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1795 	if (!ndev)
1796 		return -ENOMEM;
1797 
1798 	priv = netdev_priv(ndev);
1799 	priv->dev = &pdev->dev;
1800 	priv->can.bittiming_const = devtype->bittiming_const;
1801 	priv->can.do_set_mode = xcan_do_set_mode;
1802 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1803 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1804 					CAN_CTRLMODE_BERR_REPORTING;
1805 	priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
1806 	if (IS_ERR(priv->rstc)) {
1807 		dev_err(&pdev->dev, "Cannot get CAN reset.\n");
1808 		ret = PTR_ERR(priv->rstc);
1809 		goto err_free;
1810 	}
1811 
1812 	ret = reset_control_reset(priv->rstc);
1813 	if (ret)
1814 		goto err_free;
1815 
1816 	if (devtype->cantype == XAXI_CANFD) {
1817 		priv->can.data_bittiming_const =
1818 			&xcan_data_bittiming_const_canfd;
1819 		priv->can.tdc_const = &xcan_tdc_const_canfd;
1820 	}
1821 
1822 	if (devtype->cantype == XAXI_CANFD_2_0) {
1823 		priv->can.data_bittiming_const =
1824 			&xcan_data_bittiming_const_canfd2;
1825 		priv->can.tdc_const = &xcan_tdc_const_canfd2;
1826 	}
1827 
1828 	if (devtype->cantype == XAXI_CANFD ||
1829 	    devtype->cantype == XAXI_CANFD_2_0) {
1830 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
1831 						CAN_CTRLMODE_TDC_AUTO;
1832 		priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
1833 	}
1834 
1835 	priv->reg_base = addr;
1836 	priv->tx_max = tx_max;
1837 	priv->devtype = *devtype;
1838 	spin_lock_init(&priv->tx_lock);
1839 
1840 	/* Get IRQ for the device */
1841 	ret = platform_get_irq(pdev, 0);
1842 	if (ret < 0)
1843 		goto err_reset;
1844 
1845 	ndev->irq = ret;
1846 
1847 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1848 
1849 	platform_set_drvdata(pdev, ndev);
1850 	SET_NETDEV_DEV(ndev, &pdev->dev);
1851 	ndev->netdev_ops = &xcan_netdev_ops;
1852 	ndev->ethtool_ops = &xcan_ethtool_ops;
1853 
1854 	/* Getting the CAN can_clk info */
1855 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1856 	if (IS_ERR(priv->can_clk)) {
1857 		ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
1858 				    "device clock not found\n");
1859 		goto err_reset;
1860 	}
1861 
1862 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1863 	if (IS_ERR(priv->bus_clk)) {
1864 		ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
1865 				    "bus clock not found\n");
1866 		goto err_reset;
1867 	}
1868 
1869 	transceiver = devm_phy_optional_get(&pdev->dev, NULL);
1870 	if (IS_ERR(transceiver)) {
1871 		ret = PTR_ERR(transceiver);
1872 		dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
1873 		goto err_reset;
1874 	}
1875 	priv->transceiver = transceiver;
1876 
1877 	priv->write_reg = xcan_write_reg_le;
1878 	priv->read_reg = xcan_read_reg_le;
1879 
1880 	pm_runtime_enable(&pdev->dev);
1881 	ret = pm_runtime_get_sync(&pdev->dev);
1882 	if (ret < 0) {
1883 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1884 			   __func__, ret);
1885 		goto err_disableclks;
1886 	}
1887 
1888 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1889 		priv->write_reg = xcan_write_reg_be;
1890 		priv->read_reg = xcan_read_reg_be;
1891 	}
1892 
1893 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1894 
1895 	netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
1896 
1897 	ret = register_candev(ndev);
1898 	if (ret) {
1899 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1900 		goto err_disableclks;
1901 	}
1902 
1903 	of_can_transceiver(ndev);
1904 	pm_runtime_put(&pdev->dev);
1905 
1906 	if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1907 		priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1908 		priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1909 	}
1910 
1911 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1912 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1913 		   hw_tx_max, priv->tx_max);
1914 
1915 	return 0;
1916 
1917 err_disableclks:
1918 	pm_runtime_put(priv->dev);
1919 	pm_runtime_disable(&pdev->dev);
1920 err_reset:
1921 	reset_control_assert(priv->rstc);
1922 err_free:
1923 	free_candev(ndev);
1924 err:
1925 	return ret;
1926 }
1927 
1928 /**
1929  * xcan_remove - Unregister the device after releasing the resources
1930  * @pdev:	Handle to the platform device structure
1931  *
1932  * This function frees all the resources allocated to the device.
1933  * Return: 0 always
1934  */
xcan_remove(struct platform_device * pdev)1935 static void xcan_remove(struct platform_device *pdev)
1936 {
1937 	struct net_device *ndev = platform_get_drvdata(pdev);
1938 	struct xcan_priv *priv = netdev_priv(ndev);
1939 
1940 	unregister_candev(ndev);
1941 	pm_runtime_disable(&pdev->dev);
1942 	reset_control_assert(priv->rstc);
1943 	free_candev(ndev);
1944 }
1945 
1946 static struct platform_driver xcan_driver = {
1947 	.probe = xcan_probe,
1948 	.remove_new = xcan_remove,
1949 	.driver	= {
1950 		.name = DRIVER_NAME,
1951 		.pm = &xcan_dev_pm_ops,
1952 		.of_match_table	= xcan_of_match,
1953 	},
1954 };
1955 
1956 module_platform_driver(xcan_driver);
1957 
1958 MODULE_LICENSE("GPL");
1959 MODULE_AUTHOR("Xilinx Inc");
1960 MODULE_DESCRIPTION("Xilinx CAN interface");
1961