]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/can/m_can/m_can.c
can: m_can: m_can_do_rx_poll(): fix extraneous msg loss warning
[mirror_ubuntu-focal-kernel.git] / drivers / net / can / m_can / m_can.c
CommitLineData
f524f829
DM
1// SPDX-License-Identifier: GPL-2.0
2// CAN bus driver for Bosch M_CAN controller
3// Copyright (C) 2014 Freescale Semiconductor, Inc.
4// Dong Aisheng <b29396@freescale.com>
5// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
6
7/* Bosch M_CAN user manual can be obtained from:
e0d1f481
DA
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
e0d1f481
DA
10 */
11
e0d1f481
DA
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/netdevice.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
cdf8259d 20#include <linux/pm_runtime.h>
b03cfc5b 21#include <linux/iopoll.h>
e0d1f481 22#include <linux/can/dev.h>
c9b3bce1 23#include <linux/pinctrl/consumer.h>
e0d1f481 24
f524f829 25#include "m_can.h"
e0d1f481
DA
26
27/* registers definition */
28enum m_can_reg {
29 M_CAN_CREL = 0x0,
30 M_CAN_ENDN = 0x4,
31 M_CAN_CUST = 0x8,
5e1bd15a 32 M_CAN_DBTP = 0xc,
e0d1f481
DA
33 M_CAN_TEST = 0x10,
34 M_CAN_RWD = 0x14,
35 M_CAN_CCCR = 0x18,
5e1bd15a 36 M_CAN_NBTP = 0x1c,
e0d1f481
DA
37 M_CAN_TSCC = 0x20,
38 M_CAN_TSCV = 0x24,
39 M_CAN_TOCC = 0x28,
40 M_CAN_TOCV = 0x2c,
41 M_CAN_ECR = 0x40,
42 M_CAN_PSR = 0x44,
5e1bd15a
MH
43/* TDCR Register only available for version >=3.1.x */
44 M_CAN_TDCR = 0x48,
e0d1f481
DA
45 M_CAN_IR = 0x50,
46 M_CAN_IE = 0x54,
47 M_CAN_ILS = 0x58,
48 M_CAN_ILE = 0x5c,
49 M_CAN_GFC = 0x80,
50 M_CAN_SIDFC = 0x84,
51 M_CAN_XIDFC = 0x88,
52 M_CAN_XIDAM = 0x90,
53 M_CAN_HPMS = 0x94,
54 M_CAN_NDAT1 = 0x98,
55 M_CAN_NDAT2 = 0x9c,
56 M_CAN_RXF0C = 0xa0,
57 M_CAN_RXF0S = 0xa4,
58 M_CAN_RXF0A = 0xa8,
59 M_CAN_RXBC = 0xac,
60 M_CAN_RXF1C = 0xb0,
61 M_CAN_RXF1S = 0xb4,
62 M_CAN_RXF1A = 0xb8,
63 M_CAN_RXESC = 0xbc,
64 M_CAN_TXBC = 0xc0,
65 M_CAN_TXFQS = 0xc4,
66 M_CAN_TXESC = 0xc8,
67 M_CAN_TXBRP = 0xcc,
68 M_CAN_TXBAR = 0xd0,
69 M_CAN_TXBCR = 0xd4,
70 M_CAN_TXBTO = 0xd8,
71 M_CAN_TXBCF = 0xdc,
72 M_CAN_TXBTIE = 0xe0,
73 M_CAN_TXBCIE = 0xe4,
74 M_CAN_TXEFC = 0xf0,
75 M_CAN_TXEFS = 0xf4,
76 M_CAN_TXEFA = 0xf8,
77};
78
f524f829
DM
79/* napi related */
80#define M_CAN_NAPI_WEIGHT 64
e0d1f481 81
f524f829
DM
82/* message ram configuration data length */
83#define MRAM_CFG_LEN 8
e0d1f481 84
b03cfc5b
MH
85/* Core Release Register (CREL) */
86#define CREL_REL_SHIFT 28
87#define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
88#define CREL_STEP_SHIFT 24
89#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
90#define CREL_SUBSTEP_SHIFT 20
91#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
92
5e1bd15a
MH
93/* Data Bit Timing & Prescaler Register (DBTP) */
94#define DBTP_TDC BIT(23)
95#define DBTP_DBRP_SHIFT 16
96#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
97#define DBTP_DTSEG1_SHIFT 8
98#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
99#define DBTP_DTSEG2_SHIFT 4
100#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
101#define DBTP_DSJW_SHIFT 0
102#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
80646733 103
e759c626
FCJ
104/* Transmitter Delay Compensation Register (TDCR) */
105#define TDCR_TDCO_SHIFT 8
106#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
107#define TDCR_TDCF_SHIFT 0
108#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
109
e0d1f481 110/* Test Register (TEST) */
5e1bd15a 111#define TEST_LBCK BIT(4)
e0d1f481
DA
112
113/* CC Control Register(CCCR) */
80646733
DA
114#define CCCR_CMR_MASK 0x3
115#define CCCR_CMR_SHIFT 10
116#define CCCR_CMR_CANFD 0x1
117#define CCCR_CMR_CANFD_BRS 0x2
118#define CCCR_CMR_CAN 0x3
119#define CCCR_CME_MASK 0x3
120#define CCCR_CME_SHIFT 8
121#define CCCR_CME_CAN 0
122#define CCCR_CME_CANFD 0x1
123#define CCCR_CME_CANFD_BRS 0x2
5e1bd15a 124#define CCCR_TXP BIT(14)
80646733
DA
125#define CCCR_TEST BIT(7)
126#define CCCR_MON BIT(5)
5e1bd15a
MH
127#define CCCR_CSR BIT(4)
128#define CCCR_CSA BIT(3)
129#define CCCR_ASM BIT(2)
80646733
DA
130#define CCCR_CCE BIT(1)
131#define CCCR_INIT BIT(0)
132#define CCCR_CANFD 0x10
5e1bd15a
MH
133/* for version >=3.1.x */
134#define CCCR_EFBI BIT(13)
135#define CCCR_PXHD BIT(12)
136#define CCCR_BRSE BIT(9)
137#define CCCR_FDOE BIT(8)
138/* only for version >=3.2.x */
139#define CCCR_NISO BIT(15)
140
141/* Nominal Bit Timing & Prescaler Register (NBTP) */
142#define NBTP_NSJW_SHIFT 25
143#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
144#define NBTP_NBRP_SHIFT 16
145#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
146#define NBTP_NTSEG1_SHIFT 8
147#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
148#define NBTP_NTSEG2_SHIFT 0
149#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
e0d1f481
DA
150
151/* Error Counter Register(ECR) */
152#define ECR_RP BIT(15)
153#define ECR_REC_SHIFT 8
154#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
155#define ECR_TEC_SHIFT 0
156#define ECR_TEC_MASK 0xff
157
158/* Protocol Status Register(PSR) */
159#define PSR_BO BIT(7)
160#define PSR_EW BIT(6)
161#define PSR_EP BIT(5)
162#define PSR_LEC_MASK 0x7
163
164/* Interrupt Register(IR) */
165#define IR_ALL_INT 0xffffffff
5e1bd15a
MH
166
167/* Renamed bits for versions > 3.1.x */
168#define IR_ARA BIT(29)
169#define IR_PED BIT(28)
170#define IR_PEA BIT(27)
171
172/* Bits for version 3.0.x */
e0d1f481
DA
173#define IR_STE BIT(31)
174#define IR_FOE BIT(30)
175#define IR_ACKE BIT(29)
176#define IR_BE BIT(28)
177#define IR_CRCE BIT(27)
178#define IR_WDI BIT(26)
179#define IR_BO BIT(25)
180#define IR_EW BIT(24)
181#define IR_EP BIT(23)
182#define IR_ELO BIT(22)
183#define IR_BEU BIT(21)
184#define IR_BEC BIT(20)
185#define IR_DRX BIT(19)
186#define IR_TOO BIT(18)
187#define IR_MRAF BIT(17)
188#define IR_TSW BIT(16)
189#define IR_TEFL BIT(15)
190#define IR_TEFF BIT(14)
191#define IR_TEFW BIT(13)
192#define IR_TEFN BIT(12)
193#define IR_TFE BIT(11)
194#define IR_TCF BIT(10)
195#define IR_TC BIT(9)
196#define IR_HPM BIT(8)
197#define IR_RF1L BIT(7)
198#define IR_RF1F BIT(6)
199#define IR_RF1W BIT(5)
200#define IR_RF1N BIT(4)
201#define IR_RF0L BIT(3)
202#define IR_RF0F BIT(2)
203#define IR_RF0W BIT(1)
204#define IR_RF0N BIT(0)
205#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
5e1bd15a
MH
206
207/* Interrupts for version 3.0.x */
208#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
209#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
210 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
211 IR_RF1L | IR_RF0L)
212#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
213/* Interrupts for version >= 3.1.x */
214#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
215#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
e0d1f481
DA
216 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
217 IR_RF1L | IR_RF0L)
5e1bd15a 218#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
e0d1f481
DA
219
220/* Interrupt Line Select (ILS) */
221#define ILS_ALL_INT0 0x0
222#define ILS_ALL_INT1 0xFFFFFFFF
223
224/* Interrupt Line Enable (ILE) */
e0d1f481 225#define ILE_EINT1 BIT(1)
5e1bd15a 226#define ILE_EINT0 BIT(0)
e0d1f481
DA
227
228/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
5e1bd15a 229#define RXFC_FWM_SHIFT 24
b7db978a 230#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
5e1bd15a
MH
231#define RXFC_FS_SHIFT 16
232#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
e0d1f481
DA
233
234/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
235#define RXFS_RFL BIT(25)
236#define RXFS_FF BIT(24)
5e1bd15a 237#define RXFS_FPI_SHIFT 16
e0d1f481 238#define RXFS_FPI_MASK 0x3f0000
5e1bd15a 239#define RXFS_FGI_SHIFT 8
e0d1f481
DA
240#define RXFS_FGI_MASK 0x3f00
241#define RXFS_FFL_MASK 0x7f
242
243/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
244#define M_CAN_RXESC_8BYTES 0x0
80646733 245#define M_CAN_RXESC_64BYTES 0x777
e0d1f481
DA
246
247/* Tx Buffer Configuration(TXBC) */
5e1bd15a
MH
248#define TXBC_NDTB_SHIFT 16
249#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
250#define TXBC_TFQS_SHIFT 24
251#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
252
253/* Tx FIFO/Queue Status (TXFQS) */
254#define TXFQS_TFQF BIT(21)
255#define TXFQS_TFQPI_SHIFT 16
256#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
257#define TXFQS_TFGI_SHIFT 8
258#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
259#define TXFQS_TFFL_SHIFT 0
260#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
e0d1f481
DA
261
262/* Tx Buffer Element Size Configuration(TXESC) */
263#define TXESC_TBDS_8BYTES 0x0
80646733 264#define TXESC_TBDS_64BYTES 0x7
e0d1f481 265
5e1bd15a
MH
266/* Tx Event FIFO Configuration (TXEFC) */
267#define TXEFC_EFS_SHIFT 16
268#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
269
270/* Tx Event FIFO Status (TXEFS) */
271#define TXEFS_TEFL BIT(25)
272#define TXEFS_EFF BIT(24)
273#define TXEFS_EFGI_SHIFT 8
274#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
275#define TXEFS_EFFL_SHIFT 0
276#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
277
278/* Tx Event FIFO Acknowledge (TXEFA) */
279#define TXEFA_EFAI_SHIFT 0
280#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
e0d1f481
DA
281
282/* Message RAM Configuration (in bytes) */
283#define SIDF_ELEMENT_SIZE 4
284#define XIDF_ELEMENT_SIZE 8
80646733
DA
285#define RXF0_ELEMENT_SIZE 72
286#define RXF1_ELEMENT_SIZE 72
5e1bd15a 287#define RXB_ELEMENT_SIZE 72
e0d1f481 288#define TXE_ELEMENT_SIZE 8
80646733 289#define TXB_ELEMENT_SIZE 72
e0d1f481
DA
290
291/* Message RAM Elements */
292#define M_CAN_FIFO_ID 0x0
293#define M_CAN_FIFO_DLC 0x4
294#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
295
296/* Rx Buffer Element */
80646733 297/* R0 */
e0d1f481
DA
298#define RX_BUF_ESI BIT(31)
299#define RX_BUF_XTD BIT(30)
300#define RX_BUF_RTR BIT(29)
80646733
DA
301/* R1 */
302#define RX_BUF_ANMF BIT(31)
5e1bd15a 303#define RX_BUF_FDF BIT(21)
80646733 304#define RX_BUF_BRS BIT(20)
e0d1f481
DA
305
306/* Tx Buffer Element */
5e1bd15a
MH
307/* T0 */
308#define TX_BUF_ESI BIT(31)
e0d1f481
DA
309#define TX_BUF_XTD BIT(30)
310#define TX_BUF_RTR BIT(29)
5e1bd15a
MH
311/* T1 */
312#define TX_BUF_EFC BIT(23)
313#define TX_BUF_FDF BIT(21)
314#define TX_BUF_BRS BIT(20)
315#define TX_BUF_MM_SHIFT 24
316#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
e0d1f481 317
10c1c397
MH
318/* Tx event FIFO Element */
319/* E1 */
320#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
321#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
322
441ac340 323static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
f524f829 324{
441ac340 325 return cdev->ops->read_reg(cdev, reg);
f524f829 326}
e0d1f481 327
441ac340 328static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
f524f829 329 u32 val)
e0d1f481 330{
441ac340 331 cdev->ops->write_reg(cdev, reg, val);
e0d1f481
DA
332}
333
441ac340 334static u32 m_can_fifo_read(struct m_can_classdev *cdev,
f524f829 335 u32 fgi, unsigned int offset)
e0d1f481 336{
441ac340 337 u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
f524f829
DM
338 offset;
339
441ac340 340 return cdev->ops->read_fifo(cdev, addr_offset);
e0d1f481
DA
341}
342
441ac340 343static void m_can_fifo_write(struct m_can_classdev *cdev,
f524f829 344 u32 fpi, unsigned int offset, u32 val)
e0d1f481 345{
441ac340 346 u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
f524f829
DM
347 offset;
348
441ac340 349 cdev->ops->write_fifo(cdev, addr_offset, val);
e0d1f481
DA
350}
351
441ac340 352static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev,
f524f829 353 u32 fpi, u32 val)
e0d1f481 354{
441ac340 355 cdev->ops->write_fifo(cdev, fpi, val);
e0d1f481
DA
356}
357
441ac340 358static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset)
f524f829 359{
441ac340 360 u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
f524f829
DM
361 offset;
362
441ac340 363 return cdev->ops->read_fifo(cdev, addr_offset);
428479e4
MH
364}
365
441ac340 366static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
428479e4 367{
441ac340 368 return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
428479e4
MH
369}
370
441ac340 371void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
e0d1f481 372{
441ac340 373 u32 cccr = m_can_read(cdev, M_CAN_CCCR);
e0d1f481
DA
374 u32 timeout = 10;
375 u32 val = 0;
376
f524f829
DM
377 /* Clear the Clock stop request if it was set */
378 if (cccr & CCCR_CSR)
379 cccr &= ~CCCR_CSR;
380
e0d1f481
DA
381 if (enable) {
382 /* enable m_can configuration */
441ac340 383 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
7660f633 384 udelay(5);
e0d1f481 385 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
441ac340 386 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
e0d1f481 387 } else {
441ac340 388 m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
e0d1f481
DA
389 }
390
391 /* there's a delay for module initialization */
392 if (enable)
393 val = CCCR_INIT | CCCR_CCE;
394
441ac340 395 while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
e0d1f481 396 if (timeout == 0) {
441ac340 397 netdev_warn(cdev->net, "Failed to init module\n");
e0d1f481
DA
398 return;
399 }
400 timeout--;
401 udelay(1);
402 }
403}
404
441ac340 405static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
e0d1f481 406{
52973810 407 /* Only interrupt line 0 is used in this driver */
441ac340 408 m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
e0d1f481
DA
409}
410
441ac340 411static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
e0d1f481 412{
441ac340 413 m_can_write(cdev, M_CAN_ILE, 0x0);
e0d1f481
DA
414}
415
f524f829
DM
416static void m_can_clean(struct net_device *net)
417{
441ac340 418 struct m_can_classdev *cdev = netdev_priv(net);
f524f829 419
441ac340 420 if (cdev->tx_skb) {
f524f829
DM
421 int putidx = 0;
422
423 net->stats.tx_errors++;
441ac340
DM
424 if (cdev->version > 30)
425 putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
f524f829
DM
426 TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
427
441ac340
DM
428 can_free_echo_skb(cdev->net, putidx);
429 cdev->tx_skb = NULL;
f524f829
DM
430 }
431}
432
80646733 433static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
e0d1f481 434{
80646733 435 struct net_device_stats *stats = &dev->stats;
441ac340 436 struct m_can_classdev *cdev = netdev_priv(dev);
80646733
DA
437 struct canfd_frame *cf;
438 struct sk_buff *skb;
921f1681 439 u32 id, fgi, dlc;
80646733 440 int i;
e0d1f481
DA
441
442 /* calculate the fifo get index for where to read data */
5e1bd15a 443 fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
441ac340 444 dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
5e1bd15a 445 if (dlc & RX_BUF_FDF)
80646733
DA
446 skb = alloc_canfd_skb(dev, &cf);
447 else
448 skb = alloc_can_skb(dev, (struct can_frame **)&cf);
449 if (!skb) {
450 stats->rx_dropped++;
451 return;
452 }
453
5e1bd15a 454 if (dlc & RX_BUF_FDF)
80646733
DA
455 cf->len = can_dlc2len((dlc >> 16) & 0x0F);
456 else
457 cf->len = get_can_dlc((dlc >> 16) & 0x0F);
458
441ac340 459 id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
e0d1f481
DA
460 if (id & RX_BUF_XTD)
461 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
462 else
463 cf->can_id = (id >> 18) & CAN_SFF_MASK;
464
80646733
DA
465 if (id & RX_BUF_ESI) {
466 cf->flags |= CANFD_ESI;
467 netdev_dbg(dev, "ESI Error\n");
468 }
921f1681 469
5e1bd15a 470 if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) {
e0d1f481
DA
471 cf->can_id |= CAN_RTR_FLAG;
472 } else {
80646733
DA
473 if (dlc & RX_BUF_BRS)
474 cf->flags |= CANFD_BRS;
475
476 for (i = 0; i < cf->len; i += 4)
477 *(u32 *)(cf->data + i) =
441ac340 478 m_can_fifo_read(cdev, fgi,
80646733 479 M_CAN_FIFO_DATA(i / 4));
e0d1f481
DA
480 }
481
482 /* acknowledge rx fifo 0 */
441ac340 483 m_can_write(cdev, M_CAN_RXF0A, fgi);
80646733
DA
484
485 stats->rx_packets++;
486 stats->rx_bytes += cf->len;
487
488 netif_receive_skb(skb);
e0d1f481
DA
489}
490
491static int m_can_do_rx_poll(struct net_device *dev, int quota)
492{
441ac340 493 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
494 u32 pkts = 0;
495 u32 rxfs;
496
441ac340 497 rxfs = m_can_read(cdev, M_CAN_RXF0S);
e0d1f481
DA
498 if (!(rxfs & RXFS_FFL_MASK)) {
499 netdev_dbg(dev, "no messages in fifo0\n");
500 return 0;
501 }
502
503 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
80646733 504 m_can_read_fifo(dev, rxfs);
e0d1f481
DA
505
506 quota--;
507 pkts++;
441ac340 508 rxfs = m_can_read(cdev, M_CAN_RXF0S);
e0d1f481
DA
509 }
510
511 if (pkts)
512 can_led_event(dev, CAN_LED_EVENT_RX);
513
514 return pkts;
515}
516
517static int m_can_handle_lost_msg(struct net_device *dev)
518{
519 struct net_device_stats *stats = &dev->stats;
520 struct sk_buff *skb;
521 struct can_frame *frame;
522
523 netdev_err(dev, "msg lost in rxf0\n");
524
525 stats->rx_errors++;
526 stats->rx_over_errors++;
527
528 skb = alloc_can_err_skb(dev, &frame);
529 if (unlikely(!skb))
530 return 0;
531
532 frame->can_id |= CAN_ERR_CRTL;
533 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
534
535 netif_receive_skb(skb);
536
537 return 1;
538}
539
540static int m_can_handle_lec_err(struct net_device *dev,
541 enum m_can_lec_type lec_type)
542{
441ac340 543 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
544 struct net_device_stats *stats = &dev->stats;
545 struct can_frame *cf;
546 struct sk_buff *skb;
547
441ac340 548 cdev->can.can_stats.bus_error++;
e0d1f481
DA
549 stats->rx_errors++;
550
551 /* propagate the error condition to the CAN stack */
552 skb = alloc_can_err_skb(dev, &cf);
553 if (unlikely(!skb))
554 return 0;
555
556 /* check for 'last error code' which tells us the
557 * type of the last error to occur on the CAN bus
558 */
559 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
e0d1f481
DA
560
561 switch (lec_type) {
562 case LEC_STUFF_ERROR:
563 netdev_dbg(dev, "stuff error\n");
564 cf->data[2] |= CAN_ERR_PROT_STUFF;
565 break;
566 case LEC_FORM_ERROR:
567 netdev_dbg(dev, "form error\n");
568 cf->data[2] |= CAN_ERR_PROT_FORM;
569 break;
570 case LEC_ACK_ERROR:
571 netdev_dbg(dev, "ack error\n");
ffd461f8 572 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
e0d1f481
DA
573 break;
574 case LEC_BIT1_ERROR:
575 netdev_dbg(dev, "bit1 error\n");
576 cf->data[2] |= CAN_ERR_PROT_BIT1;
577 break;
578 case LEC_BIT0_ERROR:
579 netdev_dbg(dev, "bit0 error\n");
580 cf->data[2] |= CAN_ERR_PROT_BIT0;
581 break;
582 case LEC_CRC_ERROR:
583 netdev_dbg(dev, "CRC error\n");
ffd461f8 584 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
e0d1f481
DA
585 break;
586 default:
587 break;
588 }
589
590 stats->rx_packets++;
591 stats->rx_bytes += cf->can_dlc;
592 netif_receive_skb(skb);
593
594 return 1;
595}
596
f6a99649
DA
597static int __m_can_get_berr_counter(const struct net_device *dev,
598 struct can_berr_counter *bec)
599{
441ac340 600 struct m_can_classdev *cdev = netdev_priv(dev);
f6a99649
DA
601 unsigned int ecr;
602
441ac340 603 ecr = m_can_read(cdev, M_CAN_ECR);
f6a99649 604 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
5e1bd15a 605 bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
f6a99649
DA
606
607 return 0;
608}
609
441ac340 610static int m_can_clk_start(struct m_can_classdev *cdev)
e0d1f481 611{
e0d1f481
DA
612 int err;
613
441ac340 614 if (cdev->pm_clock_support == 0)
f524f829
DM
615 return 0;
616
441ac340 617 err = pm_runtime_get_sync(cdev->dev);
1675bee3 618 if (err < 0) {
441ac340 619 pm_runtime_put_noidle(cdev->dev);
1675bee3
FA
620 return err;
621 }
e0d1f481 622
1675bee3 623 return 0;
ef7b8aa8 624}
e0d1f481 625
441ac340 626static void m_can_clk_stop(struct m_can_classdev *cdev)
ef7b8aa8 627{
441ac340
DM
628 if (cdev->pm_clock_support)
629 pm_runtime_put_sync(cdev->dev);
ef7b8aa8
QS
630}
631
632static int m_can_get_berr_counter(const struct net_device *dev,
633 struct can_berr_counter *bec)
634{
441ac340 635 struct m_can_classdev *cdev = netdev_priv(dev);
ef7b8aa8
QS
636 int err;
637
441ac340 638 err = m_can_clk_start(cdev);
ef7b8aa8
QS
639 if (err)
640 return err;
641
642 __m_can_get_berr_counter(dev, bec);
643
441ac340 644 m_can_clk_stop(cdev);
e0d1f481
DA
645
646 return 0;
647}
648
649static int m_can_handle_state_change(struct net_device *dev,
650 enum can_state new_state)
651{
441ac340 652 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
653 struct net_device_stats *stats = &dev->stats;
654 struct can_frame *cf;
655 struct sk_buff *skb;
656 struct can_berr_counter bec;
657 unsigned int ecr;
658
659 switch (new_state) {
76be8332 660 case CAN_STATE_ERROR_WARNING:
e0d1f481 661 /* error warning state */
441ac340
DM
662 cdev->can.can_stats.error_warning++;
663 cdev->can.state = CAN_STATE_ERROR_WARNING;
e0d1f481
DA
664 break;
665 case CAN_STATE_ERROR_PASSIVE:
666 /* error passive state */
441ac340
DM
667 cdev->can.can_stats.error_passive++;
668 cdev->can.state = CAN_STATE_ERROR_PASSIVE;
e0d1f481
DA
669 break;
670 case CAN_STATE_BUS_OFF:
671 /* bus-off state */
441ac340
DM
672 cdev->can.state = CAN_STATE_BUS_OFF;
673 m_can_disable_all_interrupts(cdev);
674 cdev->can.can_stats.bus_off++;
e0d1f481
DA
675 can_bus_off(dev);
676 break;
677 default:
678 break;
679 }
680
681 /* propagate the error condition to the CAN stack */
682 skb = alloc_can_err_skb(dev, &cf);
683 if (unlikely(!skb))
684 return 0;
685
f6a99649 686 __m_can_get_berr_counter(dev, &bec);
e0d1f481
DA
687
688 switch (new_state) {
76be8332 689 case CAN_STATE_ERROR_WARNING:
e0d1f481
DA
690 /* error warning state */
691 cf->can_id |= CAN_ERR_CRTL;
692 cf->data[1] = (bec.txerr > bec.rxerr) ?
693 CAN_ERR_CRTL_TX_WARNING :
694 CAN_ERR_CRTL_RX_WARNING;
695 cf->data[6] = bec.txerr;
696 cf->data[7] = bec.rxerr;
697 break;
698 case CAN_STATE_ERROR_PASSIVE:
699 /* error passive state */
700 cf->can_id |= CAN_ERR_CRTL;
441ac340 701 ecr = m_can_read(cdev, M_CAN_ECR);
e0d1f481
DA
702 if (ecr & ECR_RP)
703 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
704 if (bec.txerr > 127)
705 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
706 cf->data[6] = bec.txerr;
707 cf->data[7] = bec.rxerr;
708 break;
709 case CAN_STATE_BUS_OFF:
710 /* bus-off state */
711 cf->can_id |= CAN_ERR_BUSOFF;
712 break;
713 default:
714 break;
715 }
716
717 stats->rx_packets++;
718 stats->rx_bytes += cf->can_dlc;
719 netif_receive_skb(skb);
720
721 return 1;
722}
723
724static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
725{
441ac340 726 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
727 int work_done = 0;
728
441ac340 729 if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
e0d1f481
DA
730 netdev_dbg(dev, "entered error warning state\n");
731 work_done += m_can_handle_state_change(dev,
732 CAN_STATE_ERROR_WARNING);
733 }
734
441ac340 735 if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
a93f5cae 736 netdev_dbg(dev, "entered error passive state\n");
e0d1f481
DA
737 work_done += m_can_handle_state_change(dev,
738 CAN_STATE_ERROR_PASSIVE);
739 }
740
441ac340 741 if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
a93f5cae 742 netdev_dbg(dev, "entered error bus off state\n");
e0d1f481
DA
743 work_done += m_can_handle_state_change(dev,
744 CAN_STATE_BUS_OFF);
745 }
746
747 return work_done;
748}
749
750static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
751{
752 if (irqstatus & IR_WDI)
753 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
a93f5cae 754 if (irqstatus & IR_ELO)
e0d1f481
DA
755 netdev_err(dev, "Error Logging Overflow\n");
756 if (irqstatus & IR_BEU)
757 netdev_err(dev, "Bit Error Uncorrected\n");
758 if (irqstatus & IR_BEC)
759 netdev_err(dev, "Bit Error Corrected\n");
760 if (irqstatus & IR_TOO)
761 netdev_err(dev, "Timeout reached\n");
762 if (irqstatus & IR_MRAF)
763 netdev_err(dev, "Message RAM access failure occurred\n");
764}
765
766static inline bool is_lec_err(u32 psr)
767{
768 psr &= LEC_UNUSED;
769
770 return psr && (psr != LEC_UNUSED);
771}
772
773static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
774 u32 psr)
775{
441ac340 776 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
777 int work_done = 0;
778
779 if (irqstatus & IR_RF0L)
780 work_done += m_can_handle_lost_msg(dev);
781
782 /* handle lec errors on the bus */
441ac340 783 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
e0d1f481
DA
784 is_lec_err(psr))
785 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
786
787 /* other unproccessed error interrupts */
788 m_can_handle_other_err(dev, irqstatus);
789
790 return work_done;
791}
792
f524f829 793static int m_can_rx_handler(struct net_device *dev, int quota)
e0d1f481 794{
441ac340 795 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
796 int work_done = 0;
797 u32 irqstatus, psr;
798
441ac340 799 irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
e0d1f481
DA
800 if (!irqstatus)
801 goto end;
802
3e82f2f3
EH
803 /* Errata workaround for issue "Needless activation of MRAF irq"
804 * During frame reception while the MCAN is in Error Passive state
805 * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
806 * it may happen that MCAN_IR.MRAF is set although there was no
807 * Message RAM access failure.
808 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
809 * The Message RAM Access Failure interrupt routine needs to check
810 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
811 * In this case, reset MCAN_IR.MRAF. No further action is required.
812 */
441ac340
DM
813 if (cdev->version <= 31 && irqstatus & IR_MRAF &&
814 m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
3e82f2f3
EH
815 struct can_berr_counter bec;
816
817 __m_can_get_berr_counter(dev, &bec);
818 if (bec.rxerr == 127) {
441ac340 819 m_can_write(cdev, M_CAN_IR, IR_MRAF);
3e82f2f3
EH
820 irqstatus &= ~IR_MRAF;
821 }
822 }
823
441ac340
DM
824 psr = m_can_read(cdev, M_CAN_PSR);
825
e0d1f481
DA
826 if (irqstatus & IR_ERR_STATE)
827 work_done += m_can_handle_state_errors(dev, psr);
828
5e1bd15a 829 if (irqstatus & IR_ERR_BUS_30X)
e0d1f481
DA
830 work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
831
832 if (irqstatus & IR_RF0N)
833 work_done += m_can_do_rx_poll(dev, (quota - work_done));
f524f829
DM
834end:
835 return work_done;
836}
e0d1f481 837
f524f829
DM
838static int m_can_rx_peripheral(struct net_device *dev)
839{
441ac340 840 struct m_can_classdev *cdev = netdev_priv(dev);
f524f829
DM
841
842 m_can_rx_handler(dev, 1);
843
441ac340 844 m_can_enable_all_interrupts(cdev);
f524f829
DM
845
846 return 0;
847}
848
849static int m_can_poll(struct napi_struct *napi, int quota)
850{
851 struct net_device *dev = napi->dev;
441ac340 852 struct m_can_classdev *cdev = netdev_priv(dev);
f524f829
DM
853 int work_done;
854
855 work_done = m_can_rx_handler(dev, quota);
e0d1f481 856 if (work_done < quota) {
6ad20165 857 napi_complete_done(napi, work_done);
441ac340 858 m_can_enable_all_interrupts(cdev);
e0d1f481
DA
859 }
860
e0d1f481
DA
861 return work_done;
862}
863
10c1c397
MH
864static void m_can_echo_tx_event(struct net_device *dev)
865{
866 u32 txe_count = 0;
867 u32 m_can_txefs;
868 u32 fgi = 0;
869 int i = 0;
870 unsigned int msg_mark;
871
441ac340 872 struct m_can_classdev *cdev = netdev_priv(dev);
10c1c397
MH
873 struct net_device_stats *stats = &dev->stats;
874
875 /* read tx event fifo status */
441ac340 876 m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
10c1c397
MH
877
878 /* Get Tx Event fifo element count */
879 txe_count = (m_can_txefs & TXEFS_EFFL_MASK)
880 >> TXEFS_EFFL_SHIFT;
881
882 /* Get and process all sent elements */
883 for (i = 0; i < txe_count; i++) {
884 /* retrieve get index */
441ac340 885 fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
10c1c397
MH
886 >> TXEFS_EFGI_SHIFT;
887
888 /* get message marker */
441ac340 889 msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) &
10c1c397
MH
890 TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
891
892 /* ack txe element */
441ac340 893 m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
10c1c397
MH
894 (fgi << TXEFA_EFAI_SHIFT)));
895
896 /* update stats */
897 stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
898 stats->tx_packets++;
899 }
900}
901
e0d1f481
DA
902static irqreturn_t m_can_isr(int irq, void *dev_id)
903{
904 struct net_device *dev = (struct net_device *)dev_id;
441ac340 905 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
906 struct net_device_stats *stats = &dev->stats;
907 u32 ir;
908
ce169c39
JN
909 if (pm_runtime_suspended(cdev->dev))
910 return IRQ_NONE;
441ac340 911 ir = m_can_read(cdev, M_CAN_IR);
e0d1f481
DA
912 if (!ir)
913 return IRQ_NONE;
914
915 /* ACK all irqs */
916 if (ir & IR_ALL_INT)
441ac340 917 m_can_write(cdev, M_CAN_IR, ir);
e0d1f481 918
441ac340
DM
919 if (cdev->ops->clear_interrupts)
920 cdev->ops->clear_interrupts(cdev);
f524f829 921
e0d1f481
DA
922 /* schedule NAPI in case of
923 * - rx IRQ
924 * - state change IRQ
925 * - bus error IRQ and bus error reporting
926 */
5e1bd15a 927 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
441ac340
DM
928 cdev->irqstatus = ir;
929 m_can_disable_all_interrupts(cdev);
930 if (!cdev->is_peripheral)
931 napi_schedule(&cdev->napi);
f524f829
DM
932 else
933 m_can_rx_peripheral(dev);
e0d1f481
DA
934 }
935
441ac340 936 if (cdev->version == 30) {
10c1c397
MH
937 if (ir & IR_TC) {
938 /* Transmission Complete Interrupt*/
939 stats->tx_bytes += can_get_echo_skb(dev, 0);
940 stats->tx_packets++;
941 can_led_event(dev, CAN_LED_EVENT_TX);
942 netif_wake_queue(dev);
943 }
944 } else {
945 if (ir & IR_TEFN) {
946 /* New TX FIFO Element arrived */
947 m_can_echo_tx_event(dev);
948 can_led_event(dev, CAN_LED_EVENT_TX);
949 if (netif_queue_stopped(dev) &&
441ac340 950 !m_can_tx_fifo_full(cdev))
10c1c397
MH
951 netif_wake_queue(dev);
952 }
e0d1f481
DA
953 }
954
955 return IRQ_HANDLED;
956}
957
b03cfc5b 958static const struct can_bittiming_const m_can_bittiming_const_30X = {
e0d1f481
DA
959 .name = KBUILD_MODNAME,
960 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
961 .tseg1_max = 64,
962 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
963 .tseg2_max = 16,
964 .sjw_max = 16,
965 .brp_min = 1,
966 .brp_max = 1024,
967 .brp_inc = 1,
968};
969
b03cfc5b 970static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
80646733
DA
971 .name = KBUILD_MODNAME,
972 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
973 .tseg1_max = 16,
974 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
975 .tseg2_max = 8,
976 .sjw_max = 4,
977 .brp_min = 1,
978 .brp_max = 32,
979 .brp_inc = 1,
980};
981
b03cfc5b
MH
982static const struct can_bittiming_const m_can_bittiming_const_31X = {
983 .name = KBUILD_MODNAME,
984 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
985 .tseg1_max = 256,
5518857e 986 .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
b03cfc5b
MH
987 .tseg2_max = 128,
988 .sjw_max = 128,
989 .brp_min = 1,
990 .brp_max = 512,
991 .brp_inc = 1,
992};
993
994static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
995 .name = KBUILD_MODNAME,
996 .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
997 .tseg1_max = 32,
998 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
999 .tseg2_max = 16,
1000 .sjw_max = 16,
1001 .brp_min = 1,
1002 .brp_max = 32,
1003 .brp_inc = 1,
1004};
1005
e0d1f481
DA
1006static int m_can_set_bittiming(struct net_device *dev)
1007{
441ac340
DM
1008 struct m_can_classdev *cdev = netdev_priv(dev);
1009 const struct can_bittiming *bt = &cdev->can.bittiming;
1010 const struct can_bittiming *dbt = &cdev->can.data_bittiming;
e0d1f481
DA
1011 u16 brp, sjw, tseg1, tseg2;
1012 u32 reg_btp;
1013
1014 brp = bt->brp - 1;
1015 sjw = bt->sjw - 1;
1016 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
1017 tseg2 = bt->phase_seg2 - 1;
5e1bd15a
MH
1018 reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
1019 (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
441ac340 1020 m_can_write(cdev, M_CAN_NBTP, reg_btp);
80646733 1021
441ac340 1022 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
e759c626 1023 reg_btp = 0;
80646733
DA
1024 brp = dbt->brp - 1;
1025 sjw = dbt->sjw - 1;
1026 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
1027 tseg2 = dbt->phase_seg2 - 1;
e759c626
FCJ
1028
1029 /* TDC is only needed for bitrates beyond 2.5 MBit/s.
1030 * This is mentioned in the "Bit Time Requirements for CAN FD"
1031 * paper presented at the International CAN Conference 2013
1032 */
1033 if (dbt->bitrate > 2500000) {
1034 u32 tdco, ssp;
1035
1036 /* Use the same value of secondary sampling point
1037 * as the data sampling point
1038 */
1039 ssp = dbt->sample_point;
1040
1041 /* Equation based on Bosch's M_CAN User Manual's
1042 * Transmitter Delay Compensation Section
1043 */
441ac340 1044 tdco = (cdev->can.clock.freq / 1000) *
e759c626
FCJ
1045 ssp / dbt->bitrate;
1046
1047 /* Max valid TDCO value is 127 */
1048 if (tdco > 127) {
1049 netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
1050 tdco);
1051 tdco = 127;
1052 }
1053
1054 reg_btp |= DBTP_TDC;
441ac340 1055 m_can_write(cdev, M_CAN_TDCR,
e759c626
FCJ
1056 tdco << TDCR_TDCO_SHIFT);
1057 }
1058
1059 reg_btp |= (brp << DBTP_DBRP_SHIFT) |
1060 (sjw << DBTP_DSJW_SHIFT) |
1061 (tseg1 << DBTP_DTSEG1_SHIFT) |
1062 (tseg2 << DBTP_DTSEG2_SHIFT);
1063
441ac340 1064 m_can_write(cdev, M_CAN_DBTP, reg_btp);
80646733 1065 }
e0d1f481
DA
1066
1067 return 0;
1068}
1069
1070/* Configure M_CAN chip:
1071 * - set rx buffer/fifo element size
1072 * - configure rx fifo
1073 * - accept non-matching frame into fifo 0
1074 * - configure tx buffer
428479e4 1075 * - >= v3.1.x: TX FIFO is used
e0d1f481
DA
1076 * - configure mode
1077 * - setup bittiming
1078 */
1079static void m_can_chip_config(struct net_device *dev)
1080{
441ac340 1081 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
1082 u32 cccr, test;
1083
441ac340 1084 m_can_config_endisable(cdev, true);
e0d1f481 1085
80646733 1086 /* RX Buffer/FIFO Element Size 64 bytes data field */
441ac340 1087 m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
e0d1f481
DA
1088
1089 /* Accept Non-matching Frames Into FIFO 0 */
441ac340 1090 m_can_write(cdev, M_CAN_GFC, 0x0);
e0d1f481 1091
441ac340 1092 if (cdev->version == 30) {
428479e4 1093 /* only support one Tx Buffer currently */
441ac340
DM
1094 m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
1095 cdev->mcfg[MRAM_TXB].off);
428479e4
MH
1096 } else {
1097 /* TX FIFO is used for newer IP Core versions */
441ac340
DM
1098 m_can_write(cdev, M_CAN_TXBC,
1099 (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
1100 (cdev->mcfg[MRAM_TXB].off));
428479e4 1101 }
e0d1f481 1102
80646733 1103 /* support 64 bytes payload */
441ac340 1104 m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES);
e0d1f481 1105
428479e4 1106 /* TX Event FIFO */
441ac340
DM
1107 if (cdev->version == 30) {
1108 m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
1109 cdev->mcfg[MRAM_TXE].off);
428479e4
MH
1110 } else {
1111 /* Full TX Event FIFO is used */
441ac340
DM
1112 m_can_write(cdev, M_CAN_TXEFC,
1113 ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
428479e4 1114 & TXEFC_EFS_MASK) |
441ac340 1115 cdev->mcfg[MRAM_TXE].off);
428479e4 1116 }
e0d1f481
DA
1117
1118 /* rx fifo configuration, blocking mode, fifo size 1 */
441ac340
DM
1119 m_can_write(cdev, M_CAN_RXF0C,
1120 (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
1121 cdev->mcfg[MRAM_RXF0].off);
e0d1f481 1122
441ac340
DM
1123 m_can_write(cdev, M_CAN_RXF1C,
1124 (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
1125 cdev->mcfg[MRAM_RXF1].off);
e0d1f481 1126
441ac340
DM
1127 cccr = m_can_read(cdev, M_CAN_CCCR);
1128 test = m_can_read(cdev, M_CAN_TEST);
e0d1f481 1129 test &= ~TEST_LBCK;
441ac340 1130 if (cdev->version == 30) {
b03cfc5b 1131 /* Version 3.0.x */
e0d1f481 1132
b03cfc5b
MH
1133 cccr &= ~(CCCR_TEST | CCCR_MON |
1134 (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
1135 (CCCR_CME_MASK << CCCR_CME_SHIFT));
1136
441ac340 1137 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
b03cfc5b
MH
1138 cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
1139
1140 } else {
1141 /* Version 3.1.x or 3.2.x */
393753b2
RF
1142 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1143 CCCR_NISO);
b03cfc5b
MH
1144
1145 /* Only 3.2.x has NISO Bit implemented */
441ac340 1146 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
b03cfc5b
MH
1147 cccr |= CCCR_NISO;
1148
441ac340 1149 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
b03cfc5b
MH
1150 cccr |= (CCCR_BRSE | CCCR_FDOE);
1151 }
e0d1f481 1152
b03cfc5b 1153 /* Loopback Mode */
441ac340 1154 if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
b03cfc5b 1155 cccr |= CCCR_TEST | CCCR_MON;
e0d1f481
DA
1156 test |= TEST_LBCK;
1157 }
1158
b03cfc5b 1159 /* Enable Monitoring (all versions) */
441ac340 1160 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
b03cfc5b 1161 cccr |= CCCR_MON;
80646733 1162
b03cfc5b 1163 /* Write config */
441ac340
DM
1164 m_can_write(cdev, M_CAN_CCCR, cccr);
1165 m_can_write(cdev, M_CAN_TEST, test);
e0d1f481 1166
b03cfc5b 1167 /* Enable interrupts */
441ac340
DM
1168 m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
1169 if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
1170 if (cdev->version == 30)
1171 m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
b03cfc5b
MH
1172 ~(IR_ERR_LEC_30X));
1173 else
441ac340 1174 m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
b03cfc5b 1175 ~(IR_ERR_LEC_31X));
e0d1f481 1176 else
441ac340 1177 m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
e0d1f481
DA
1178
1179 /* route all interrupts to INT0 */
441ac340 1180 m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
e0d1f481
DA
1181
1182 /* set bittiming params */
1183 m_can_set_bittiming(dev);
1184
441ac340 1185 m_can_config_endisable(cdev, false);
f524f829 1186
441ac340
DM
1187 if (cdev->ops->init)
1188 cdev->ops->init(cdev);
e0d1f481
DA
1189}
1190
1191static void m_can_start(struct net_device *dev)
1192{
441ac340 1193 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
1194
1195 /* basic m_can configuration */
1196 m_can_chip_config(dev);
1197
441ac340 1198 cdev->can.state = CAN_STATE_ERROR_ACTIVE;
e0d1f481 1199
441ac340 1200 m_can_enable_all_interrupts(cdev);
e0d1f481
DA
1201}
1202
1203static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
1204{
1205 switch (mode) {
1206 case CAN_MODE_START:
f524f829 1207 m_can_clean(dev);
e0d1f481
DA
1208 m_can_start(dev);
1209 netif_wake_queue(dev);
1210 break;
1211 default:
1212 return -EOPNOTSUPP;
1213 }
1214
1215 return 0;
1216}
1217
b03cfc5b
MH
1218/* Checks core release number of M_CAN
1219 * returns 0 if an unsupported device is detected
1220 * else it returns the release and step coded as:
1221 * return value = 10 * <release> + 1 * <step>
1222 */
441ac340 1223static int m_can_check_core_release(struct m_can_classdev *cdev)
b03cfc5b
MH
1224{
1225 u32 crel_reg;
1226 u8 rel;
1227 u8 step;
1228 int res;
b03cfc5b
MH
1229
1230 /* Read Core Release Version and split into version number
1231 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
1232 */
441ac340 1233 crel_reg = m_can_read(cdev, M_CAN_CREL);
b03cfc5b
MH
1234 rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
1235 step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
1236
1237 if (rel == 3) {
1238 /* M_CAN v3.x.y: create return value */
1239 res = 30 + step;
1240 } else {
1241 /* Unsupported M_CAN version */
1242 res = 0;
1243 }
1244
1245 return res;
1246}
1247
1248/* Selectable Non ISO support only in version 3.2.x
1249 * This function checks if the bit is writable.
1250 */
441ac340 1251static bool m_can_niso_supported(struct m_can_classdev *cdev)
b03cfc5b 1252{
f524f829
DM
1253 u32 cccr_reg, cccr_poll = 0;
1254 int niso_timeout = -ETIMEDOUT;
1255 int i;
b03cfc5b 1256
441ac340
DM
1257 m_can_config_endisable(cdev, true);
1258 cccr_reg = m_can_read(cdev, M_CAN_CCCR);
b03cfc5b 1259 cccr_reg |= CCCR_NISO;
441ac340 1260 m_can_write(cdev, M_CAN_CCCR, cccr_reg);
b03cfc5b 1261
f524f829 1262 for (i = 0; i <= 10; i++) {
441ac340 1263 cccr_poll = m_can_read(cdev, M_CAN_CCCR);
f524f829
DM
1264 if (cccr_poll == cccr_reg) {
1265 niso_timeout = 0;
1266 break;
1267 }
1268
1269 usleep_range(1, 5);
1270 }
b03cfc5b
MH
1271
1272 /* Clear NISO */
1273 cccr_reg &= ~(CCCR_NISO);
441ac340 1274 m_can_write(cdev, M_CAN_CCCR, cccr_reg);
b03cfc5b 1275
441ac340 1276 m_can_config_endisable(cdev, false);
b03cfc5b
MH
1277
1278 /* return false if time out (-ETIMEDOUT), else return true */
1279 return !niso_timeout;
1280}
1281
441ac340 1282static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
e0d1f481 1283{
f524f829 1284 struct net_device *dev = m_can_dev->net;
b03cfc5b 1285 int m_can_version;
b03cfc5b 1286
f524f829 1287 m_can_version = m_can_check_core_release(m_can_dev);
b03cfc5b
MH
1288 /* return if unsupported version */
1289 if (!m_can_version) {
f524f829 1290 dev_err(m_can_dev->dev, "Unsupported version number: %2d",
5e520edd
FA
1291 m_can_version);
1292 return -EINVAL;
b03cfc5b 1293 }
e0d1f481 1294
f524f829
DM
1295 if (!m_can_dev->is_peripheral)
1296 netif_napi_add(dev, &m_can_dev->napi,
1297 m_can_poll, M_CAN_NAPI_WEIGHT);
e0d1f481 1298
b03cfc5b 1299 /* Shared properties of all M_CAN versions */
f524f829
DM
1300 m_can_dev->version = m_can_version;
1301 m_can_dev->can.do_set_mode = m_can_set_mode;
1302 m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter;
6cfda7fb 1303
b03cfc5b 1304 /* Set M_CAN supported operations */
f524f829 1305 m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
e0d1f481 1306 CAN_CTRLMODE_LISTENONLY |
80646733
DA
1307 CAN_CTRLMODE_BERR_REPORTING |
1308 CAN_CTRLMODE_FD;
e0d1f481 1309
b03cfc5b 1310 /* Set properties depending on M_CAN version */
f524f829 1311 switch (m_can_dev->version) {
b03cfc5b
MH
1312 case 30:
1313 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
1314 can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
f524f829
DM
1315 m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
1316 m_can_dev->bit_timing : &m_can_bittiming_const_30X;
1317
1318 m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
1319 m_can_dev->data_timing :
1320 &m_can_data_bittiming_const_30X;
b03cfc5b
MH
1321 break;
1322 case 31:
1323 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
1324 can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
f524f829
DM
1325 m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
1326 m_can_dev->bit_timing : &m_can_bittiming_const_31X;
1327
1328 m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
1329 m_can_dev->data_timing :
1330 &m_can_data_bittiming_const_31X;
b03cfc5b
MH
1331 break;
1332 case 32:
9b5b1ddf
PS
1333 case 33:
1334 /* Support both MCAN version v3.2.x and v3.3.0 */
f524f829
DM
1335 m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
1336 m_can_dev->bit_timing : &m_can_bittiming_const_31X;
1337
1338 m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
1339 m_can_dev->data_timing :
1340 &m_can_data_bittiming_const_31X;
1341
1342 m_can_dev->can.ctrlmode_supported |=
1343 (m_can_niso_supported(m_can_dev)
b03cfc5b
MH
1344 ? CAN_CTRLMODE_FD_NON_ISO
1345 : 0);
1346 break;
1347 default:
f524f829
DM
1348 dev_err(m_can_dev->dev, "Unsupported version number: %2d",
1349 m_can_dev->version);
5e520edd 1350 return -EINVAL;
b03cfc5b
MH
1351 }
1352
f524f829
DM
1353 if (m_can_dev->ops->init)
1354 m_can_dev->ops->init(m_can_dev);
e0d1f481
DA
1355
1356 return 0;
e0d1f481
DA
1357}
1358
1359static void m_can_stop(struct net_device *dev)
1360{
441ac340 1361 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
1362
1363 /* disable all interrupts */
441ac340 1364 m_can_disable_all_interrupts(cdev);
e0d1f481 1365
4b068abc
FA
1366 /* Set init mode to disengage from the network */
1367 m_can_config_endisable(cdev, true);
1368
e0d1f481 1369 /* set the state as STOPPED */
441ac340 1370 cdev->can.state = CAN_STATE_STOPPED;
e0d1f481
DA
1371}
1372
1373static int m_can_close(struct net_device *dev)
1374{
441ac340 1375 struct m_can_classdev *cdev = netdev_priv(dev);
e0d1f481
DA
1376
1377 netif_stop_queue(dev);
441ac340
DM
1378
1379 if (!cdev->is_peripheral)
1380 napi_disable(&cdev->napi);
1381
e0d1f481 1382 m_can_stop(dev);
441ac340 1383 m_can_clk_stop(cdev);
e0d1f481 1384 free_irq(dev->irq, dev);
f524f829 1385
441ac340
DM
1386 if (cdev->is_peripheral) {
1387 cdev->tx_skb = NULL;
1388 destroy_workqueue(cdev->tx_wq);
1389 cdev->tx_wq = NULL;
f524f829
DM
1390 }
1391
e0d1f481
DA
1392 close_candev(dev);
1393 can_led_event(dev, CAN_LED_EVENT_STOP);
1394
1395 return 0;
1396}
1397
10c1c397
MH
1398static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
1399{
441ac340 1400 struct m_can_classdev *cdev = netdev_priv(dev);
10c1c397 1401 /*get wrap around for loopback skb index */
441ac340 1402 unsigned int wrap = cdev->can.echo_skb_max;
10c1c397
MH
1403 int next_idx;
1404
1405 /* calculate next index */
1406 next_idx = (++putidx >= wrap ? 0 : putidx);
1407
1408 /* check if occupied */
441ac340 1409 return !!cdev->can.echo_skb[next_idx];
10c1c397
MH
1410}
1411
441ac340 1412static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
e0d1f481 1413{
441ac340
DM
1414 struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
1415 struct net_device *dev = cdev->net;
1416 struct sk_buff *skb = cdev->tx_skb;
10c1c397 1417 u32 id, cccr, fdflags;
80646733 1418 int i;
10c1c397 1419 int putidx;
e0d1f481 1420
10c1c397
MH
1421 /* Generate ID field for TX buffer Element */
1422 /* Common to all supported M_CAN versions */
e0d1f481
DA
1423 if (cf->can_id & CAN_EFF_FLAG) {
1424 id = cf->can_id & CAN_EFF_MASK;
1425 id |= TX_BUF_XTD;
1426 } else {
1427 id = ((cf->can_id & CAN_SFF_MASK) << 18);
1428 }
1429
1430 if (cf->can_id & CAN_RTR_FLAG)
1431 id |= TX_BUF_RTR;
1432
441ac340 1433 if (cdev->version == 30) {
10c1c397
MH
1434 netif_stop_queue(dev);
1435
1436 /* message ram configuration */
441ac340
DM
1437 m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
1438 m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
10c1c397 1439 can_len2dlc(cf->len) << 16);
80646733 1440
10c1c397 1441 for (i = 0; i < cf->len; i += 4)
441ac340 1442 m_can_fifo_write(cdev, 0,
10c1c397
MH
1443 M_CAN_FIFO_DATA(i / 4),
1444 *(u32 *)(cf->data + i));
1445
1446 can_put_echo_skb(skb, dev, 0);
1447
441ac340
DM
1448 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
1449 cccr = m_can_read(cdev, M_CAN_CCCR);
10c1c397
MH
1450 cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
1451 if (can_is_canfd_skb(skb)) {
1452 if (cf->flags & CANFD_BRS)
1453 cccr |= CCCR_CMR_CANFD_BRS <<
1454 CCCR_CMR_SHIFT;
1455 else
1456 cccr |= CCCR_CMR_CANFD <<
1457 CCCR_CMR_SHIFT;
1458 } else {
1459 cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
1460 }
441ac340 1461 m_can_write(cdev, M_CAN_CCCR, cccr);
10c1c397 1462 }
441ac340
DM
1463 m_can_write(cdev, M_CAN_TXBTIE, 0x1);
1464 m_can_write(cdev, M_CAN_TXBAR, 0x1);
10c1c397
MH
1465 /* End of xmit function for version 3.0.x */
1466 } else {
1467 /* Transmit routine for version >= v3.1.x */
1468
1469 /* Check if FIFO full */
441ac340 1470 if (m_can_tx_fifo_full(cdev)) {
10c1c397
MH
1471 /* This shouldn't happen */
1472 netif_stop_queue(dev);
1473 netdev_warn(dev,
1474 "TX queue active although FIFO is full.");
441ac340
DM
1475
1476 if (cdev->is_peripheral) {
f524f829
DM
1477 kfree_skb(skb);
1478 dev->stats.tx_dropped++;
1479 return NETDEV_TX_OK;
1480 } else {
1481 return NETDEV_TX_BUSY;
1482 }
10c1c397 1483 }
80646733 1484
10c1c397 1485 /* get put index for frame */
441ac340 1486 putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
10c1c397
MH
1487 >> TXFQS_TFQPI_SHIFT);
1488 /* Write ID Field to FIFO Element */
441ac340 1489 m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
e0d1f481 1490
10c1c397
MH
1491 /* get CAN FD configuration of frame */
1492 fdflags = 0;
80646733 1493 if (can_is_canfd_skb(skb)) {
10c1c397 1494 fdflags |= TX_BUF_FDF;
80646733 1495 if (cf->flags & CANFD_BRS)
10c1c397 1496 fdflags |= TX_BUF_BRS;
80646733 1497 }
80646733 1498
10c1c397
MH
1499 /* Construct DLC Field. Also contains CAN-FD configuration
1500 * use put index of fifo as message marker
1501 * it is used in TX interrupt for
1502 * sending the correct echo frame
1503 */
441ac340 1504 m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
10c1c397
MH
1505 ((putidx << TX_BUF_MM_SHIFT) &
1506 TX_BUF_MM_MASK) |
1507 (can_len2dlc(cf->len) << 16) |
1508 fdflags | TX_BUF_EFC);
1509
1510 for (i = 0; i < cf->len; i += 4)
441ac340 1511 m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4),
10c1c397
MH
1512 *(u32 *)(cf->data + i));
1513
1514 /* Push loopback echo.
1515 * Will be looped back on TX interrupt based on message marker
1516 */
1517 can_put_echo_skb(skb, dev, putidx);
1518
1519 /* Enable TX FIFO element to start transfer */
441ac340 1520 m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
10c1c397
MH
1521
1522 /* stop network queue if fifo full */
441ac340 1523 if (m_can_tx_fifo_full(cdev) ||
f524f829
DM
1524 m_can_next_echo_skb_occupied(dev, putidx))
1525 netif_stop_queue(dev);
10c1c397 1526 }
e0d1f481
DA
1527
1528 return NETDEV_TX_OK;
1529}
1530
f524f829
DM
1531static void m_can_tx_work_queue(struct work_struct *ws)
1532{
441ac340 1533 struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
f524f829 1534 tx_work);
441ac340
DM
1535
1536 m_can_tx_handler(cdev);
1537 cdev->tx_skb = NULL;
f524f829
DM
1538}
1539
1540static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
1541 struct net_device *dev)
1542{
441ac340 1543 struct m_can_classdev *cdev = netdev_priv(dev);
f524f829
DM
1544
1545 if (can_dropped_invalid_skb(dev, skb))
1546 return NETDEV_TX_OK;
1547
441ac340
DM
1548 if (cdev->is_peripheral) {
1549 if (cdev->tx_skb) {
f524f829
DM
1550 netdev_err(dev, "hard_xmit called while tx busy\n");
1551 return NETDEV_TX_BUSY;
1552 }
1553
441ac340 1554 if (cdev->can.state == CAN_STATE_BUS_OFF) {
f524f829
DM
1555 m_can_clean(dev);
1556 } else {
1557 /* Need to stop the queue to avoid numerous requests
1558 * from being sent. Suggested improvement is to create
1559 * a queueing mechanism that will queue the skbs and
1560 * process them in order.
1561 */
441ac340
DM
1562 cdev->tx_skb = skb;
1563 netif_stop_queue(cdev->net);
1564 queue_work(cdev->tx_wq, &cdev->tx_work);
f524f829
DM
1565 }
1566 } else {
441ac340
DM
1567 cdev->tx_skb = skb;
1568 return m_can_tx_handler(cdev);
f524f829
DM
1569 }
1570
1571 return NETDEV_TX_OK;
1572}
1573
1574static int m_can_open(struct net_device *dev)
1575{
441ac340 1576 struct m_can_classdev *cdev = netdev_priv(dev);
f524f829
DM
1577 int err;
1578
441ac340 1579 err = m_can_clk_start(cdev);
f524f829
DM
1580 if (err)
1581 return err;
1582
1583 /* open the can device */
1584 err = open_candev(dev);
1585 if (err) {
1586 netdev_err(dev, "failed to open can device\n");
1587 goto exit_disable_clks;
1588 }
1589
1590 /* register interrupt handler */
441ac340
DM
1591 if (cdev->is_peripheral) {
1592 cdev->tx_skb = NULL;
1593 cdev->tx_wq = alloc_workqueue("mcan_wq",
f524f829 1594 WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
441ac340 1595 if (!cdev->tx_wq) {
f524f829
DM
1596 err = -ENOMEM;
1597 goto out_wq_fail;
1598 }
1599
441ac340 1600 INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
f524f829
DM
1601
1602 err = request_threaded_irq(dev->irq, NULL, m_can_isr,
b517768e 1603 IRQF_ONESHOT,
f524f829
DM
1604 dev->name, dev);
1605 } else {
1606 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
1607 dev);
1608 }
1609
1610 if (err < 0) {
1611 netdev_err(dev, "failed to request interrupt\n");
1612 goto exit_irq_fail;
1613 }
1614
1615 /* start the m_can controller */
1616 m_can_start(dev);
1617
1618 can_led_event(dev, CAN_LED_EVENT_OPEN);
1619
441ac340
DM
1620 if (!cdev->is_peripheral)
1621 napi_enable(&cdev->napi);
f524f829
DM
1622
1623 netif_start_queue(dev);
1624
1625 return 0;
1626
1627exit_irq_fail:
441ac340
DM
1628 if (cdev->is_peripheral)
1629 destroy_workqueue(cdev->tx_wq);
f524f829
DM
1630out_wq_fail:
1631 close_candev(dev);
1632exit_disable_clks:
441ac340 1633 m_can_clk_stop(cdev);
f524f829
DM
1634 return err;
1635}
1636
e0d1f481
DA
1637static const struct net_device_ops m_can_netdev_ops = {
1638 .ndo_open = m_can_open,
1639 .ndo_stop = m_can_close,
1640 .ndo_start_xmit = m_can_start_xmit,
d6fdb38b 1641 .ndo_change_mtu = can_change_mtu,
e0d1f481
DA
1642};
1643
1644static int register_m_can_dev(struct net_device *dev)
1645{
1646 dev->flags |= IFF_ECHO; /* we support local echo */
1647 dev->netdev_ops = &m_can_netdev_ops;
1648
1649 return register_candev(dev);
1650}
1651
441ac340 1652static void m_can_of_parse_mram(struct m_can_classdev *cdev,
b03cfc5b 1653 const u32 *mram_config_vals)
e0d1f481 1654{
441ac340
DM
1655 cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
1656 cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
1657 cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
1658 cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
1659 cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
1660 cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
1661 cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
1662 cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
5e1bd15a 1663 (RXFC_FS_MASK >> RXFC_FS_SHIFT);
441ac340
DM
1664 cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
1665 cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
1666 cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
5e1bd15a 1667 (RXFC_FS_MASK >> RXFC_FS_SHIFT);
441ac340
DM
1668 cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
1669 cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
1670 cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
1671 cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
1672 cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
1673 cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
1674 cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
1675 cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
1676 cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
5e1bd15a 1677 (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
e0d1f481 1678
441ac340 1679 dev_dbg(cdev->dev,
f524f829 1680 "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
441ac340
DM
1681 cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
1682 cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
1683 cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
1684 cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
1685 cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
1686 cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
1687 cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
e0d1f481
DA
1688}
1689
441ac340 1690void m_can_init_ram(struct m_can_classdev *cdev)
e0d1f481 1691{
f524f829 1692 int end, i, start;
e0d1f481 1693
f524f829
DM
1694 /* initialize the entire Message RAM in use to avoid possible
1695 * ECC/parity checksum errors when reading an uninitialized buffer
1696 */
441ac340
DM
1697 start = cdev->mcfg[MRAM_SIDF].off;
1698 end = cdev->mcfg[MRAM_TXB].off +
1699 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
b03cfc5b 1700
f524f829 1701 for (i = start; i < end; i += 4)
441ac340 1702 m_can_fifo_write_no_off(cdev, i, 0x0);
f524f829
DM
1703}
1704EXPORT_SYMBOL_GPL(m_can_init_ram);
e0d1f481 1705
441ac340 1706int m_can_class_get_clocks(struct m_can_classdev *m_can_dev)
f524f829
DM
1707{
1708 int ret = 0;
e0d1f481 1709
f524f829
DM
1710 m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk");
1711 m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk");
e0d1f481 1712
f524f829
DM
1713 if (IS_ERR(m_can_dev->cclk)) {
1714 dev_err(m_can_dev->dev, "no clock found\n");
b03cfc5b 1715 ret = -ENODEV;
b03cfc5b
MH
1716 }
1717
f524f829
DM
1718 return ret;
1719}
1720EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
b03cfc5b 1721
441ac340 1722struct m_can_classdev *m_can_class_allocate_dev(struct device *dev)
f524f829 1723{
441ac340 1724 struct m_can_classdev *class_dev = NULL;
f524f829
DM
1725 u32 mram_config_vals[MRAM_CFG_LEN];
1726 struct net_device *net_dev;
1727 u32 tx_fifo_size;
1728 int ret;
1729
1730 ret = fwnode_property_read_u32_array(dev_fwnode(dev),
1731 "bosch,mram-cfg",
1732 mram_config_vals,
1733 sizeof(mram_config_vals) / 4);
b03cfc5b 1734 if (ret) {
f524f829
DM
1735 dev_err(dev, "Could not get Message RAM configuration.");
1736 goto out;
b03cfc5b
MH
1737 }
1738
1739 /* Get TX FIFO size
1740 * Defines the total amount of echo buffers for loopback
1741 */
1742 tx_fifo_size = mram_config_vals[7];
1743
1744 /* allocate the m_can device */
f524f829
DM
1745 net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size);
1746 if (!net_dev) {
1747 dev_err(dev, "Failed to allocate CAN device");
1748 goto out;
b03cfc5b 1749 }
5e520edd 1750
f524f829
DM
1751 class_dev = netdev_priv(net_dev);
1752 if (!class_dev) {
441ac340 1753 dev_err(dev, "Failed to init netdev cdevate");
f524f829
DM
1754 goto out;
1755 }
e0d1f481 1756
f524f829
DM
1757 class_dev->net = net_dev;
1758 class_dev->dev = dev;
1759 SET_NETDEV_DEV(net_dev, dev);
e0d1f481 1760
f524f829
DM
1761 m_can_of_parse_mram(class_dev, mram_config_vals);
1762out:
1763 return class_dev;
1764}
1765EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
1766
0d608ce8
DM
1767void m_can_class_free_dev(struct net_device *net)
1768{
1769 free_candev(net);
1770}
1771EXPORT_SYMBOL_GPL(m_can_class_free_dev);
1772
441ac340 1773int m_can_class_register(struct m_can_classdev *m_can_dev)
f524f829
DM
1774{
1775 int ret;
cdf8259d 1776
f524f829
DM
1777 if (m_can_dev->pm_clock_support) {
1778 pm_runtime_enable(m_can_dev->dev);
1779 ret = m_can_clk_start(m_can_dev);
1780 if (ret)
1781 goto pm_runtime_fail;
1782 }
1783
1784 ret = m_can_dev_setup(m_can_dev);
cdf8259d
FA
1785 if (ret)
1786 goto clk_disable;
1787
f524f829 1788 ret = register_m_can_dev(m_can_dev->net);
e0d1f481 1789 if (ret) {
f524f829
DM
1790 dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n",
1791 m_can_dev->net->name, ret);
cdf8259d 1792 goto clk_disable;
e0d1f481
DA
1793 }
1794
f524f829 1795 devm_can_led_init(m_can_dev->net);
e0d1f481 1796
f524f829 1797 of_can_transceiver(m_can_dev->net);
31643dc8 1798
f524f829
DM
1799 dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n",
1800 KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version);
e0d1f481 1801
b03cfc5b
MH
1802 /* Probe finished
1803 * Stop clocks. They will be reactivated once the M_CAN device is opened
1804 */
cdf8259d 1805clk_disable:
f524f829 1806 m_can_clk_stop(m_can_dev);
cdf8259d
FA
1807pm_runtime_fail:
1808 if (ret) {
f524f829
DM
1809 if (m_can_dev->pm_clock_support)
1810 pm_runtime_disable(m_can_dev->dev);
1811 free_candev(m_can_dev->net);
cdf8259d 1812 }
f524f829 1813
e0d1f481
DA
1814 return ret;
1815}
f524f829 1816EXPORT_SYMBOL_GPL(m_can_class_register);
e0d1f481 1817
f524f829 1818int m_can_class_suspend(struct device *dev)
e0d1f481
DA
1819{
1820 struct net_device *ndev = dev_get_drvdata(dev);
441ac340 1821 struct m_can_classdev *cdev = netdev_priv(ndev);
e0d1f481
DA
1822
1823 if (netif_running(ndev)) {
1824 netif_stop_queue(ndev);
1825 netif_device_detach(ndev);
d14ccea0 1826 m_can_stop(ndev);
441ac340 1827 m_can_clk_stop(cdev);
e0d1f481
DA
1828 }
1829
c9b3bce1
BH
1830 pinctrl_pm_select_sleep_state(dev);
1831
441ac340 1832 cdev->can.state = CAN_STATE_SLEEPING;
e0d1f481
DA
1833
1834 return 0;
1835}
f524f829 1836EXPORT_SYMBOL_GPL(m_can_class_suspend);
e0d1f481 1837
f524f829 1838int m_can_class_resume(struct device *dev)
e0d1f481
DA
1839{
1840 struct net_device *ndev = dev_get_drvdata(dev);
441ac340 1841 struct m_can_classdev *cdev = netdev_priv(ndev);
e0d1f481 1842
c9b3bce1
BH
1843 pinctrl_pm_select_default_state(dev);
1844
441ac340 1845 cdev->can.state = CAN_STATE_ERROR_ACTIVE;
e0d1f481
DA
1846
1847 if (netif_running(ndev)) {
d14ccea0
QS
1848 int ret;
1849
441ac340 1850 ret = m_can_clk_start(cdev);
d14ccea0
QS
1851 if (ret)
1852 return ret;
1853
441ac340 1854 m_can_init_ram(cdev);
d14ccea0 1855 m_can_start(ndev);
e0d1f481
DA
1856 netif_device_attach(ndev);
1857 netif_start_queue(ndev);
1858 }
1859
1860 return 0;
1861}
f524f829 1862EXPORT_SYMBOL_GPL(m_can_class_resume);
e0d1f481 1863
441ac340 1864void m_can_class_unregister(struct m_can_classdev *m_can_dev)
e0d1f481 1865{
f524f829 1866 unregister_candev(m_can_dev->net);
e0d1f481 1867
f524f829 1868 free_candev(m_can_dev->net);
cdf8259d 1869}
f524f829 1870EXPORT_SYMBOL_GPL(m_can_class_unregister);
e0d1f481
DA
1871
1872MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
f524f829 1873MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
e0d1f481
DA
1874MODULE_LICENSE("GPL v2");
1875MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");