1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020, 2021 Pengutronix,
6 // Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14
15 #include <asm/unaligned.h>
16
17 #include "mcp251xfd.h"
18 #include "mcp251xfd-ram.h"
19
20 static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv * priv,union mcp251xfd_write_reg_buf * write_reg_buf,const u16 reg,const u32 mask,const u32 val)21 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
22 union mcp251xfd_write_reg_buf *write_reg_buf,
23 const u16 reg, const u32 mask, const u32 val)
24 {
25 u8 first_byte, last_byte, len;
26 u8 *data;
27 __le32 val_le32;
28
29 first_byte = mcp251xfd_first_byte_set(mask);
30 last_byte = mcp251xfd_last_byte_set(mask);
31 len = last_byte - first_byte + 1;
32
33 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
34 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
35 memcpy(data, &val_le32, len);
36
37 if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
38 len += sizeof(write_reg_buf->nocrc.cmd);
39 } else if (len == 1) {
40 u16 crc;
41
42 /* CRC */
43 len += sizeof(write_reg_buf->safe.cmd);
44 crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
45 put_unaligned_be16(crc, (void *)write_reg_buf + len);
46
47 /* Total length */
48 len += sizeof(write_reg_buf->safe.crc);
49 } else {
50 u16 crc;
51
52 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
53 len);
54 /* CRC */
55 len += sizeof(write_reg_buf->crc.cmd);
56 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
57 put_unaligned_be16(crc, (void *)write_reg_buf + len);
58
59 /* Total length */
60 len += sizeof(write_reg_buf->crc.crc);
61 }
62
63 return len;
64 }
65
66 static void
mcp251xfd_ring_init_tef(struct mcp251xfd_priv * priv,u16 * base)67 mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
68 {
69 struct mcp251xfd_tef_ring *tef_ring;
70 struct spi_transfer *xfer;
71 u32 val;
72 u16 addr;
73 u8 len;
74 int i;
75
76 /* TEF */
77 tef_ring = priv->tef;
78 tef_ring->head = 0;
79 tef_ring->tail = 0;
80
81 /* TEF- and TX-FIFO have same number of objects */
82 *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
83
84 /* FIFO IRQ enable */
85 addr = MCP251XFD_REG_TEFCON;
86 val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
87
88 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
89 addr, val, val);
90 tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
91 tef_ring->irq_enable_xfer.len = len;
92 spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
93 &tef_ring->irq_enable_xfer, 1);
94
95 /* FIFO increment TEF tail pointer */
96 addr = MCP251XFD_REG_TEFCON;
97 val = MCP251XFD_REG_TEFCON_UINC;
98 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
99 addr, val, val);
100
101 for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
102 xfer = &tef_ring->uinc_xfer[i];
103 xfer->tx_buf = &tef_ring->uinc_buf;
104 xfer->len = len;
105 xfer->cs_change = 1;
106 xfer->cs_change_delay.value = 0;
107 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
108 }
109
110 /* "cs_change == 1" on the last transfer results in an active
111 * chip select after the complete SPI message. This causes the
112 * controller to interpret the next register access as
113 * data. Set "cs_change" of the last transfer to "0" to
114 * properly deactivate the chip select at the end of the
115 * message.
116 */
117 xfer->cs_change = 0;
118
119 if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
120 val = MCP251XFD_REG_TEFCON_UINC |
121 MCP251XFD_REG_TEFCON_TEFOVIE |
122 MCP251XFD_REG_TEFCON_TEFHIE;
123
124 len = mcp251xfd_cmd_prepare_write_reg(priv,
125 &tef_ring->uinc_irq_disable_buf,
126 addr, val, val);
127 xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
128 xfer->len = len;
129 }
130 }
131
132 static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv * priv,const struct mcp251xfd_tx_ring * ring,struct mcp251xfd_tx_obj * tx_obj,const u8 rts_buf_len,const u8 n)133 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
134 const struct mcp251xfd_tx_ring *ring,
135 struct mcp251xfd_tx_obj *tx_obj,
136 const u8 rts_buf_len,
137 const u8 n)
138 {
139 struct spi_transfer *xfer;
140 u16 addr;
141
142 /* FIFO load */
143 addr = mcp251xfd_get_tx_obj_addr(ring, n);
144 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
145 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
146 addr);
147 else
148 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
149 addr);
150
151 xfer = &tx_obj->xfer[0];
152 xfer->tx_buf = &tx_obj->buf;
153 xfer->len = 0; /* actual len is assigned on the fly */
154 xfer->cs_change = 1;
155 xfer->cs_change_delay.value = 0;
156 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
157
158 /* FIFO request to send */
159 xfer = &tx_obj->xfer[1];
160 xfer->tx_buf = &ring->rts_buf;
161 xfer->len = rts_buf_len;
162
163 /* SPI message */
164 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
165 ARRAY_SIZE(tx_obj->xfer));
166 }
167
168 static void
mcp251xfd_ring_init_tx(struct mcp251xfd_priv * priv,u16 * base,u8 * fifo_nr)169 mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
170 {
171 struct mcp251xfd_tx_ring *tx_ring;
172 struct mcp251xfd_tx_obj *tx_obj;
173 u32 val;
174 u16 addr;
175 u8 len;
176 int i;
177
178 tx_ring = priv->tx;
179 tx_ring->head = 0;
180 tx_ring->tail = 0;
181 tx_ring->base = *base;
182 tx_ring->nr = 0;
183 tx_ring->fifo_nr = *fifo_nr;
184
185 *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
186 *fifo_nr += 1;
187
188 /* FIFO request to send */
189 addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
190 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
191 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
192 addr, val, val);
193
194 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
195 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
196 }
197
198 static void
mcp251xfd_ring_init_rx(struct mcp251xfd_priv * priv,u16 * base,u8 * fifo_nr)199 mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
200 {
201 struct mcp251xfd_rx_ring *rx_ring;
202 struct spi_transfer *xfer;
203 u32 val;
204 u16 addr;
205 u8 len;
206 int i, j;
207
208 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
209 rx_ring->head = 0;
210 rx_ring->tail = 0;
211 rx_ring->base = *base;
212 rx_ring->nr = i;
213 rx_ring->fifo_nr = *fifo_nr;
214
215 *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
216 *fifo_nr += 1;
217
218 /* FIFO IRQ enable */
219 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
220 val = MCP251XFD_REG_FIFOCON_RXOVIE |
221 MCP251XFD_REG_FIFOCON_TFNRFNIE;
222 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
223 addr, val, val);
224 rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
225 rx_ring->irq_enable_xfer.len = len;
226 spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
227 &rx_ring->irq_enable_xfer, 1);
228
229 /* FIFO increment RX tail pointer */
230 val = MCP251XFD_REG_FIFOCON_UINC;
231 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
232 addr, val, val);
233
234 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
235 xfer = &rx_ring->uinc_xfer[j];
236 xfer->tx_buf = &rx_ring->uinc_buf;
237 xfer->len = len;
238 xfer->cs_change = 1;
239 xfer->cs_change_delay.value = 0;
240 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
241 }
242
243 /* "cs_change == 1" on the last transfer results in an
244 * active chip select after the complete SPI
245 * message. This causes the controller to interpret
246 * the next register access as data. Set "cs_change"
247 * of the last transfer to "0" to properly deactivate
248 * the chip select at the end of the message.
249 */
250 xfer->cs_change = 0;
251
252 /* Use 1st RX-FIFO for IRQ coalescing. If enabled
253 * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
254 * is activated), use the last transfer to disable:
255 *
256 * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
257 *
258 * and enable:
259 *
260 * - TFHRFHIE (Receive FIFO Half Full Interrupt)
261 * - or -
262 * - TFERFFIE (Receive FIFO Full Interrupt)
263 *
264 * depending on rx_max_coalesce_frames_irq.
265 *
266 * The RXOVIE (Overflow Interrupt) is always enabled.
267 */
268 if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
269 priv->rx_obj_num_coalesce_irq)) {
270 val = MCP251XFD_REG_FIFOCON_UINC |
271 MCP251XFD_REG_FIFOCON_RXOVIE;
272
273 if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
274 val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
275 else if (priv->rx_obj_num_coalesce_irq)
276 val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
277
278 len = mcp251xfd_cmd_prepare_write_reg(priv,
279 &rx_ring->uinc_irq_disable_buf,
280 addr, val, val);
281 xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
282 xfer->len = len;
283 }
284 }
285 }
286
mcp251xfd_ring_init(struct mcp251xfd_priv * priv)287 int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
288 {
289 const struct mcp251xfd_rx_ring *rx_ring;
290 u16 base = 0, ram_used;
291 u8 fifo_nr = 1;
292 int i;
293
294 netdev_reset_queue(priv->ndev);
295
296 mcp251xfd_ring_init_tef(priv, &base);
297 mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
298 mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
299
300 /* mcp251xfd_handle_rxif() will iterate over all RX rings.
301 * Rings with their corresponding bit set in
302 * priv->regs_status.rxif are read out.
303 *
304 * If the chip is configured for only 1 RX-FIFO, and if there
305 * is an RX interrupt pending (RXIF in INT register is set),
306 * it must be the 1st RX-FIFO.
307 *
308 * We mark the RXIF of the 1st FIFO as pending here, so that
309 * we can skip the read of the RXIF register in
310 * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
311 *
312 * If we use more than 1 RX-FIFO, this value gets overwritten
313 * in mcp251xfd_read_regs_status(), so set it unconditionally
314 * here.
315 */
316 priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
317
318 if (priv->tx_obj_num_coalesce_irq) {
319 netdev_dbg(priv->ndev,
320 "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
321 mcp251xfd_get_tef_obj_addr(0),
322 priv->tx_obj_num_coalesce_irq,
323 sizeof(struct mcp251xfd_hw_tef_obj),
324 priv->tx_obj_num_coalesce_irq *
325 sizeof(struct mcp251xfd_hw_tef_obj));
326
327 netdev_dbg(priv->ndev,
328 " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
329 mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
330 priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
331 sizeof(struct mcp251xfd_hw_tef_obj),
332 (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
333 sizeof(struct mcp251xfd_hw_tef_obj));
334 } else {
335 netdev_dbg(priv->ndev,
336 "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
337 mcp251xfd_get_tef_obj_addr(0),
338 priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
339 priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
340 }
341
342 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
343 if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
344 netdev_dbg(priv->ndev,
345 "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
346 rx_ring->nr, rx_ring->fifo_nr,
347 mcp251xfd_get_rx_obj_addr(rx_ring, 0),
348 priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
349 priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
350
351 if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
352 continue;
353
354 netdev_dbg(priv->ndev,
355 " 0x%03x: %2u*%u bytes = %4u bytes\n",
356 mcp251xfd_get_rx_obj_addr(rx_ring,
357 priv->rx_obj_num_coalesce_irq),
358 rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
359 rx_ring->obj_size,
360 (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
361 rx_ring->obj_size);
362 } else {
363 netdev_dbg(priv->ndev,
364 "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
365 rx_ring->nr, rx_ring->fifo_nr,
366 mcp251xfd_get_rx_obj_addr(rx_ring, 0),
367 rx_ring->obj_num, rx_ring->obj_size,
368 rx_ring->obj_num * rx_ring->obj_size);
369 }
370 }
371
372 netdev_dbg(priv->ndev,
373 "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
374 priv->tx->fifo_nr,
375 mcp251xfd_get_tx_obj_addr(priv->tx, 0),
376 priv->tx->obj_num, priv->tx->obj_size,
377 priv->tx->obj_num * priv->tx->obj_size);
378
379 netdev_dbg(priv->ndev,
380 "FIFO setup: free: %4d bytes\n",
381 MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
382
383 ram_used = base - MCP251XFD_RAM_START;
384 if (ram_used > MCP251XFD_RAM_SIZE) {
385 netdev_err(priv->ndev,
386 "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
387 ram_used, MCP251XFD_RAM_SIZE);
388 return -ENOMEM;
389 }
390
391 return 0;
392 }
393
mcp251xfd_ring_free(struct mcp251xfd_priv * priv)394 void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
395 {
396 int i;
397
398 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
399 kfree(priv->rx[i]);
400 priv->rx[i] = NULL;
401 }
402 }
403
mcp251xfd_rx_irq_timer(struct hrtimer * t)404 static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
405 {
406 struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
407 rx_irq_timer);
408 struct mcp251xfd_rx_ring *ring = priv->rx[0];
409
410 if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
411 return HRTIMER_NORESTART;
412
413 spi_async(priv->spi, &ring->irq_enable_msg);
414
415 return HRTIMER_NORESTART;
416 }
417
mcp251xfd_tx_irq_timer(struct hrtimer * t)418 static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
419 {
420 struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
421 tx_irq_timer);
422 struct mcp251xfd_tef_ring *ring = priv->tef;
423
424 if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
425 return HRTIMER_NORESTART;
426
427 spi_async(priv->spi, &ring->irq_enable_msg);
428
429 return HRTIMER_NORESTART;
430 }
431
432 const struct can_ram_config mcp251xfd_ram_config = {
433 .rx = {
434 .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
435 .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
436 .min = MCP251XFD_RX_OBJ_NUM_MIN,
437 .max = MCP251XFD_RX_OBJ_NUM_MAX,
438 .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
439 .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
440 .fifo_num = MCP251XFD_FIFO_RX_NUM,
441 .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
442 .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
443 },
444 .tx = {
445 .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
446 sizeof(struct mcp251xfd_hw_tx_obj_can),
447 .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
448 sizeof(struct mcp251xfd_hw_tx_obj_canfd),
449 .min = MCP251XFD_TX_OBJ_NUM_MIN,
450 .max = MCP251XFD_TX_OBJ_NUM_MAX,
451 .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
452 .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
453 .fifo_num = MCP251XFD_FIFO_TX_NUM,
454 .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
455 .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
456 },
457 .size = MCP251XFD_RAM_SIZE,
458 .fifo_depth = MCP251XFD_FIFO_DEPTH,
459 };
460
mcp251xfd_ring_alloc(struct mcp251xfd_priv * priv)461 int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
462 {
463 const bool fd_mode = mcp251xfd_is_fd_mode(priv);
464 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
465 struct mcp251xfd_rx_ring *rx_ring;
466 u8 tx_obj_size, rx_obj_size;
467 u8 rem, i;
468
469 /* switching from CAN-2.0 to CAN-FD mode or vice versa */
470 if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
471 struct can_ram_layout layout;
472
473 can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
474 priv->rx_obj_num = layout.default_rx;
475 tx_ring->obj_num = layout.default_tx;
476 }
477
478 if (fd_mode) {
479 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
480 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
481 set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
482 } else {
483 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
484 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
485 clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
486 }
487
488 tx_ring->obj_size = tx_obj_size;
489
490 rem = priv->rx_obj_num;
491 for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
492 u8 rx_obj_num;
493
494 if (i == 0 && priv->rx_obj_num_coalesce_irq)
495 rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
496 MCP251XFD_FIFO_DEPTH);
497 else
498 rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
499 MCP251XFD_FIFO_DEPTH);
500 rem -= rx_obj_num;
501
502 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
503 GFP_KERNEL);
504 if (!rx_ring) {
505 mcp251xfd_ring_free(priv);
506 return -ENOMEM;
507 }
508
509 rx_ring->obj_num = rx_obj_num;
510 rx_ring->obj_size = rx_obj_size;
511 priv->rx[i] = rx_ring;
512 }
513 priv->rx_ring_num = i;
514
515 hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
516 priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
517
518 hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
519 priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
520
521 return 0;
522 }
523