]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/can/c_can/c_can.c
can: c_can: Simplify buffer reenabling
[mirror_ubuntu-artful-kernel.git] / drivers / net / can / c_can / c_can.c
1 /*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/if_ether.h>
35 #include <linux/list.h>
36 #include <linux/io.h>
37 #include <linux/pm_runtime.h>
38
39 #include <linux/can.h>
40 #include <linux/can/dev.h>
41 #include <linux/can/error.h>
42 #include <linux/can/led.h>
43
44 #include "c_can.h"
45
46 /* Number of interface registers */
47 #define IF_ENUM_REG_LEN 11
48 #define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
49
50 /* control extension register D_CAN specific */
51 #define CONTROL_EX_PDR BIT(8)
52
53 /* control register */
54 #define CONTROL_TEST BIT(7)
55 #define CONTROL_CCE BIT(6)
56 #define CONTROL_DISABLE_AR BIT(5)
57 #define CONTROL_ENABLE_AR (0 << 5)
58 #define CONTROL_EIE BIT(3)
59 #define CONTROL_SIE BIT(2)
60 #define CONTROL_IE BIT(1)
61 #define CONTROL_INIT BIT(0)
62
63 /* test register */
64 #define TEST_RX BIT(7)
65 #define TEST_TX1 BIT(6)
66 #define TEST_TX2 BIT(5)
67 #define TEST_LBACK BIT(4)
68 #define TEST_SILENT BIT(3)
69 #define TEST_BASIC BIT(2)
70
71 /* status register */
72 #define STATUS_PDA BIT(10)
73 #define STATUS_BOFF BIT(7)
74 #define STATUS_EWARN BIT(6)
75 #define STATUS_EPASS BIT(5)
76 #define STATUS_RXOK BIT(4)
77 #define STATUS_TXOK BIT(3)
78
79 /* error counter register */
80 #define ERR_CNT_TEC_MASK 0xff
81 #define ERR_CNT_TEC_SHIFT 0
82 #define ERR_CNT_REC_SHIFT 8
83 #define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
84 #define ERR_CNT_RP_SHIFT 15
85 #define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
86
87 /* bit-timing register */
88 #define BTR_BRP_MASK 0x3f
89 #define BTR_BRP_SHIFT 0
90 #define BTR_SJW_SHIFT 6
91 #define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
92 #define BTR_TSEG1_SHIFT 8
93 #define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
94 #define BTR_TSEG2_SHIFT 12
95 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
96
97 /* brp extension register */
98 #define BRP_EXT_BRPE_MASK 0x0f
99 #define BRP_EXT_BRPE_SHIFT 0
100
101 /* IFx command request */
102 #define IF_COMR_BUSY BIT(15)
103
104 /* IFx command mask */
105 #define IF_COMM_WR BIT(7)
106 #define IF_COMM_MASK BIT(6)
107 #define IF_COMM_ARB BIT(5)
108 #define IF_COMM_CONTROL BIT(4)
109 #define IF_COMM_CLR_INT_PND BIT(3)
110 #define IF_COMM_TXRQST BIT(2)
111 #define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
112 #define IF_COMM_DATAA BIT(1)
113 #define IF_COMM_DATAB BIT(0)
114 #define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
115 IF_COMM_CONTROL | IF_COMM_TXRQST | \
116 IF_COMM_DATAA | IF_COMM_DATAB)
117
118 /* For the low buffers we clear the interrupt bit, but keep newdat */
119 #define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
120 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
121 IF_COMM_DATAA | IF_COMM_DATAB)
122
123 /* For the high buffers we clear the interrupt bit and newdat */
124 #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
125
126 /* IFx arbitration */
127 #define IF_ARB_MSGVAL BIT(15)
128 #define IF_ARB_MSGXTD BIT(14)
129 #define IF_ARB_TRANSMIT BIT(13)
130
131 /* IFx message control */
132 #define IF_MCONT_NEWDAT BIT(15)
133 #define IF_MCONT_MSGLST BIT(14)
134 #define IF_MCONT_INTPND BIT(13)
135 #define IF_MCONT_UMASK BIT(12)
136 #define IF_MCONT_TXIE BIT(11)
137 #define IF_MCONT_RXIE BIT(10)
138 #define IF_MCONT_RMTEN BIT(9)
139 #define IF_MCONT_TXRQST BIT(8)
140 #define IF_MCONT_EOB BIT(7)
141 #define IF_MCONT_DLC_MASK 0xf
142
143 /*
144 * Use IF1 for RX and IF2 for TX
145 */
146 #define IF_RX 0
147 #define IF_TX 1
148
149 /* status interrupt */
150 #define STATUS_INTERRUPT 0x8000
151
152 /* global interrupt masks */
153 #define ENABLE_ALL_INTERRUPTS 1
154 #define DISABLE_ALL_INTERRUPTS 0
155
156 /* minimum timeout for checking BUSY status */
157 #define MIN_TIMEOUT_VALUE 6
158
159 /* Wait for ~1 sec for INIT bit */
160 #define INIT_WAIT_MS 1000
161
162 /* napi related */
163 #define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
164
165 /* c_can lec values */
166 enum c_can_lec_type {
167 LEC_NO_ERROR = 0,
168 LEC_STUFF_ERROR,
169 LEC_FORM_ERROR,
170 LEC_ACK_ERROR,
171 LEC_BIT1_ERROR,
172 LEC_BIT0_ERROR,
173 LEC_CRC_ERROR,
174 LEC_UNUSED,
175 LEC_MASK = LEC_UNUSED,
176 };
177
178 /*
179 * c_can error types:
180 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
181 */
182 enum c_can_bus_error_types {
183 C_CAN_NO_ERROR = 0,
184 C_CAN_BUS_OFF,
185 C_CAN_ERROR_WARNING,
186 C_CAN_ERROR_PASSIVE,
187 };
188
189 static const struct can_bittiming_const c_can_bittiming_const = {
190 .name = KBUILD_MODNAME,
191 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
192 .tseg1_max = 16,
193 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
194 .tseg2_max = 8,
195 .sjw_max = 4,
196 .brp_min = 1,
197 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
198 .brp_inc = 1,
199 };
200
201 static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
202 {
203 if (priv->device)
204 pm_runtime_enable(priv->device);
205 }
206
207 static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
208 {
209 if (priv->device)
210 pm_runtime_disable(priv->device);
211 }
212
213 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
214 {
215 if (priv->device)
216 pm_runtime_get_sync(priv->device);
217 }
218
219 static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
220 {
221 if (priv->device)
222 pm_runtime_put_sync(priv->device);
223 }
224
225 static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
226 {
227 if (priv->raminit)
228 priv->raminit(priv, enable);
229 }
230
231 static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
232 {
233 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
234 C_CAN_MSG_OBJ_TX_FIRST;
235 }
236
237 static inline int get_tx_echo_msg_obj(int txecho)
238 {
239 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
240 }
241
242 static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
243 {
244 u32 val = priv->read_reg(priv, index);
245 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
246 return val;
247 }
248
249 static void c_can_enable_all_interrupts(struct c_can_priv *priv,
250 int enable)
251 {
252 unsigned int cntrl_save = priv->read_reg(priv,
253 C_CAN_CTRL_REG);
254
255 if (enable)
256 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
257 else
258 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
259
260 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
261 }
262
263 static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
264 {
265 int count = MIN_TIMEOUT_VALUE;
266
267 while (count && priv->read_reg(priv,
268 C_CAN_IFACE(COMREQ_REG, iface)) &
269 IF_COMR_BUSY) {
270 count--;
271 udelay(1);
272 }
273
274 if (!count)
275 return 1;
276
277 return 0;
278 }
279
280 static inline void c_can_object_get(struct net_device *dev,
281 int iface, int objno, int mask)
282 {
283 struct c_can_priv *priv = netdev_priv(dev);
284
285 /*
286 * As per specs, after writting the message object number in the
287 * IF command request register the transfer b/w interface
288 * register and message RAM must be complete in 6 CAN-CLK
289 * period.
290 */
291 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
292 IFX_WRITE_LOW_16BIT(mask));
293 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
294 IFX_WRITE_LOW_16BIT(objno));
295
296 if (c_can_msg_obj_is_busy(priv, iface))
297 netdev_err(dev, "timed out in object get\n");
298 }
299
300 static inline void c_can_object_put(struct net_device *dev,
301 int iface, int objno, int mask)
302 {
303 struct c_can_priv *priv = netdev_priv(dev);
304
305 /*
306 * As per specs, after writting the message object number in the
307 * IF command request register the transfer b/w interface
308 * register and message RAM must be complete in 6 CAN-CLK
309 * period.
310 */
311 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
312 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
313 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
314 IFX_WRITE_LOW_16BIT(objno));
315
316 if (c_can_msg_obj_is_busy(priv, iface))
317 netdev_err(dev, "timed out in object put\n");
318 }
319
320 static void c_can_write_msg_object(struct net_device *dev,
321 int iface, struct can_frame *frame, int objno)
322 {
323 int i;
324 u16 flags = 0;
325 unsigned int id;
326 struct c_can_priv *priv = netdev_priv(dev);
327
328 if (!(frame->can_id & CAN_RTR_FLAG))
329 flags |= IF_ARB_TRANSMIT;
330
331 if (frame->can_id & CAN_EFF_FLAG) {
332 id = frame->can_id & CAN_EFF_MASK;
333 flags |= IF_ARB_MSGXTD;
334 } else
335 id = ((frame->can_id & CAN_SFF_MASK) << 18);
336
337 flags |= IF_ARB_MSGVAL;
338
339 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
340 IFX_WRITE_LOW_16BIT(id));
341 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
342 IFX_WRITE_HIGH_16BIT(id));
343
344 for (i = 0; i < frame->can_dlc; i += 2) {
345 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
346 frame->data[i] | (frame->data[i + 1] << 8));
347 }
348
349 /* enable interrupt for this message object */
350 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
351 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
352 frame->can_dlc);
353 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
354 }
355
356 static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
357 int iface)
358 {
359 int i;
360
361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
362 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
363 }
364
365 static int c_can_handle_lost_msg_obj(struct net_device *dev,
366 int iface, int objno, u32 ctrl)
367 {
368 struct net_device_stats *stats = &dev->stats;
369 struct c_can_priv *priv = netdev_priv(dev);
370 struct can_frame *frame;
371 struct sk_buff *skb;
372
373 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
374 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
375 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
376
377 stats->rx_errors++;
378 stats->rx_over_errors++;
379
380 /* create an error msg */
381 skb = alloc_can_err_skb(dev, &frame);
382 if (unlikely(!skb))
383 return 0;
384
385 frame->can_id |= CAN_ERR_CRTL;
386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
387
388 netif_receive_skb(skb);
389 return 1;
390 }
391
392 static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
393 {
394 u16 flags, data;
395 int i;
396 unsigned int val;
397 struct c_can_priv *priv = netdev_priv(dev);
398 struct net_device_stats *stats = &dev->stats;
399 struct sk_buff *skb;
400 struct can_frame *frame;
401
402 skb = alloc_can_skb(dev, &frame);
403 if (!skb) {
404 stats->rx_dropped++;
405 return -ENOMEM;
406 }
407
408 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
409
410 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
411 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
412 (flags << 16);
413
414 if (flags & IF_ARB_MSGXTD)
415 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
416 else
417 frame->can_id = (val >> 18) & CAN_SFF_MASK;
418
419 if (flags & IF_ARB_TRANSMIT)
420 frame->can_id |= CAN_RTR_FLAG;
421 else {
422 for (i = 0; i < frame->can_dlc; i += 2) {
423 data = priv->read_reg(priv,
424 C_CAN_IFACE(DATA1_REG, iface) + i / 2);
425 frame->data[i] = data;
426 frame->data[i + 1] = data >> 8;
427 }
428 }
429
430 stats->rx_packets++;
431 stats->rx_bytes += frame->can_dlc;
432
433 netif_receive_skb(skb);
434 return 0;
435 }
436
437 static void c_can_setup_receive_object(struct net_device *dev, int iface,
438 int objno, unsigned int mask,
439 unsigned int id, unsigned int mcont)
440 {
441 struct c_can_priv *priv = netdev_priv(dev);
442
443 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
444 IFX_WRITE_LOW_16BIT(mask));
445
446 /* According to C_CAN documentation, the reserved bit
447 * in IFx_MASK2 register is fixed 1
448 */
449 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
450 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
451
452 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
453 IFX_WRITE_LOW_16BIT(id));
454 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
455 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
456
457 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
458 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
459
460 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
461 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
462 }
463
464 static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
465 {
466 struct c_can_priv *priv = netdev_priv(dev);
467
468 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
469 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
470 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
471
472 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
473
474 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
475 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
476 }
477
478 static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
479 {
480 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
481
482 /*
483 * as transmission request register's bit n-1 corresponds to
484 * message object n, we need to handle the same properly.
485 */
486 if (val & (1 << (objno - 1)))
487 return 1;
488
489 return 0;
490 }
491
492 static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
493 struct net_device *dev)
494 {
495 u32 msg_obj_no;
496 struct c_can_priv *priv = netdev_priv(dev);
497 struct can_frame *frame = (struct can_frame *)skb->data;
498
499 if (can_dropped_invalid_skb(dev, skb))
500 return NETDEV_TX_OK;
501
502 spin_lock_bh(&priv->xmit_lock);
503 msg_obj_no = get_tx_next_msg_obj(priv);
504
505 /* prepare message object for transmission */
506 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
507 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
518 spin_unlock_bh(&priv->xmit_lock);
519
520 return NETDEV_TX_OK;
521 }
522
523 static int c_can_wait_for_ctrl_init(struct net_device *dev,
524 struct c_can_priv *priv, u32 init)
525 {
526 int retry = 0;
527
528 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
529 udelay(10);
530 if (retry++ > 1000) {
531 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
532 return -EIO;
533 }
534 }
535 return 0;
536 }
537
538 static int c_can_set_bittiming(struct net_device *dev)
539 {
540 unsigned int reg_btr, reg_brpe, ctrl_save;
541 u8 brp, brpe, sjw, tseg1, tseg2;
542 u32 ten_bit_brp;
543 struct c_can_priv *priv = netdev_priv(dev);
544 const struct can_bittiming *bt = &priv->can.bittiming;
545 int res;
546
547 /* c_can provides a 6-bit brp and 4-bit brpe fields */
548 ten_bit_brp = bt->brp - 1;
549 brp = ten_bit_brp & BTR_BRP_MASK;
550 brpe = ten_bit_brp >> 6;
551
552 sjw = bt->sjw - 1;
553 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
554 tseg2 = bt->phase_seg2 - 1;
555 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
556 (tseg2 << BTR_TSEG2_SHIFT);
557 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
558
559 netdev_info(dev,
560 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
561
562 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
563 ctrl_save &= ~CONTROL_INIT;
564 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
565 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
566 if (res)
567 return res;
568
569 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
570 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
571 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
572
573 return c_can_wait_for_ctrl_init(dev, priv, 0);
574 }
575
576 /*
577 * Configure C_CAN message objects for Tx and Rx purposes:
578 * C_CAN provides a total of 32 message objects that can be configured
579 * either for Tx or Rx purposes. Here the first 16 message objects are used as
580 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
581 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
582 * See user guide document for further details on configuring message
583 * objects.
584 */
585 static void c_can_configure_msg_objects(struct net_device *dev)
586 {
587 int i;
588
589 /* first invalidate all message objects */
590 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
591 c_can_inval_msg_object(dev, IF_RX, i);
592
593 /* setup receive message objects */
594 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
595 c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
596 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
597
598 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
599 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
600 }
601
602 /*
603 * Configure C_CAN chip:
604 * - enable/disable auto-retransmission
605 * - set operating mode
606 * - configure message objects
607 */
608 static int c_can_chip_config(struct net_device *dev)
609 {
610 struct c_can_priv *priv = netdev_priv(dev);
611
612 /* enable automatic retransmission */
613 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
614
615 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
616 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
617 /* loopback + silent mode : useful for hot self-test */
618 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
619 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
620 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
621 /* loopback mode : useful for self-test function */
622 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
623 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
624 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
625 /* silent mode : bus-monitoring mode */
626 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
627 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
628 }
629
630 /* configure message objects */
631 c_can_configure_msg_objects(dev);
632
633 /* set a `lec` value so that we can check for updates later */
634 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
635
636 /* set bittiming params */
637 return c_can_set_bittiming(dev);
638 }
639
640 static int c_can_start(struct net_device *dev)
641 {
642 struct c_can_priv *priv = netdev_priv(dev);
643 int err;
644
645 /* basic c_can configuration */
646 err = c_can_chip_config(dev);
647 if (err)
648 return err;
649
650 priv->can.state = CAN_STATE_ERROR_ACTIVE;
651
652 /* reset tx helper pointers */
653 priv->tx_next = priv->tx_echo = 0;
654
655 return 0;
656 }
657
658 static void c_can_stop(struct net_device *dev)
659 {
660 struct c_can_priv *priv = netdev_priv(dev);
661
662 /* disable all interrupts */
663 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
664
665 /* set the state as STOPPED */
666 priv->can.state = CAN_STATE_STOPPED;
667 }
668
669 static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
670 {
671 struct c_can_priv *priv = netdev_priv(dev);
672 int err;
673
674 switch (mode) {
675 case CAN_MODE_START:
676 err = c_can_start(dev);
677 if (err)
678 return err;
679 netif_wake_queue(dev);
680 /* enable status change, error and module interrupts */
681 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
682 break;
683 default:
684 return -EOPNOTSUPP;
685 }
686
687 return 0;
688 }
689
690 static int __c_can_get_berr_counter(const struct net_device *dev,
691 struct can_berr_counter *bec)
692 {
693 unsigned int reg_err_counter;
694 struct c_can_priv *priv = netdev_priv(dev);
695
696 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
697 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
698 ERR_CNT_REC_SHIFT;
699 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
700
701 return 0;
702 }
703
704 static int c_can_get_berr_counter(const struct net_device *dev,
705 struct can_berr_counter *bec)
706 {
707 struct c_can_priv *priv = netdev_priv(dev);
708 int err;
709
710 c_can_pm_runtime_get_sync(priv);
711 err = __c_can_get_berr_counter(dev, bec);
712 c_can_pm_runtime_put_sync(priv);
713
714 return err;
715 }
716
717 /*
718 * priv->tx_echo holds the number of the oldest can_frame put for
719 * transmission into the hardware, but not yet ACKed by the CAN tx
720 * complete IRQ.
721 *
722 * We iterate from priv->tx_echo to priv->tx_next and check if the
723 * packet has been transmitted, echo it back to the CAN framework.
724 * If we discover a not yet transmitted packet, stop looking for more.
725 */
726 static void c_can_do_tx(struct net_device *dev)
727 {
728 struct c_can_priv *priv = netdev_priv(dev);
729 struct net_device_stats *stats = &dev->stats;
730 u32 val, obj, pkts = 0, bytes = 0;
731
732 spin_lock_bh(&priv->xmit_lock);
733
734 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
735 obj = get_tx_echo_msg_obj(priv->tx_echo);
736 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
737
738 if (val & (1 << (obj - 1)))
739 break;
740
741 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
742 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
743 pkts++;
744 c_can_inval_msg_object(dev, IF_TX, obj);
745 }
746
747 /* restart queue if wrap-up or if queue stalled on last pkt */
748 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
749 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
750 netif_wake_queue(dev);
751
752 spin_unlock_bh(&priv->xmit_lock);
753
754 if (pkts) {
755 stats->tx_bytes += bytes;
756 stats->tx_packets += pkts;
757 can_led_event(dev, CAN_LED_EVENT_TX);
758 }
759 }
760
761 /*
762 * If we have a gap in the pending bits, that means we either
763 * raced with the hardware or failed to readout all upper
764 * objects in the last run due to quota limit.
765 */
766 static u32 c_can_adjust_pending(u32 pend)
767 {
768 u32 weight, lasts;
769
770 if (pend == RECEIVE_OBJECT_BITS)
771 return pend;
772
773 /*
774 * If the last set bit is larger than the number of pending
775 * bits we have a gap.
776 */
777 weight = hweight32(pend);
778 lasts = fls(pend);
779
780 /* If the bits are linear, nothing to do */
781 if (lasts == weight)
782 return pend;
783
784 /*
785 * Find the first set bit after the gap. We walk backwards
786 * from the last set bit.
787 */
788 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
789
790 return pend & ~((1 << lasts) - 1);
791 }
792
793 static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
794 u32 pend, int quota)
795 {
796 u32 pkts = 0, ctrl, obj, mcmd;
797
798 while ((obj = ffs(pend)) && quota > 0) {
799 pend &= ~BIT(obj - 1);
800
801 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
802 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
803
804 c_can_object_get(dev, IF_RX, obj, mcmd);
805 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
806
807 if (ctrl & IF_MCONT_MSGLST) {
808 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
809
810 pkts += n;
811 quota -= n;
812 continue;
813 }
814
815 /*
816 * This really should not happen, but this covers some
817 * odd HW behaviour. Do not remove that unless you
818 * want to brick your machine.
819 */
820 if (!(ctrl & IF_MCONT_NEWDAT))
821 continue;
822
823 /* read the data from the message object */
824 c_can_read_msg_object(dev, IF_RX, ctrl);
825
826 if (obj == C_CAN_MSG_RX_LOW_LAST)
827 /* activate all lower message objects */
828 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
829
830 pkts++;
831 quota--;
832 }
833
834 return pkts;
835 }
836
837 /*
838 * theory of operation:
839 *
840 * c_can core saves a received CAN message into the first free message
841 * object it finds free (starting with the lowest). Bits NEWDAT and
842 * INTPND are set for this message object indicating that a new message
843 * has arrived. To work-around this issue, we keep two groups of message
844 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
845 *
846 * To ensure in-order frame reception we use the following
847 * approach while re-activating a message object to receive further
848 * frames:
849 * - if the current message object number is lower than
850 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
851 * the INTPND bit.
852 * - if the current message object number is equal to
853 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
854 * receive message objects.
855 * - if the current message object number is greater than
856 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
857 * only this message object.
858 */
859 static int c_can_do_rx_poll(struct net_device *dev, int quota)
860 {
861 struct c_can_priv *priv = netdev_priv(dev);
862 u32 pkts = 0, pend = 0, toread, n;
863
864 /*
865 * It is faster to read only one 16bit register. This is only possible
866 * for a maximum number of 16 objects.
867 */
868 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
869 "Implementation does not support more message objects than 16");
870
871 while (quota > 0) {
872 if (!pend) {
873 pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
874 if (!pend)
875 break;
876 /*
877 * If the pending field has a gap, handle the
878 * bits above the gap first.
879 */
880 toread = c_can_adjust_pending(pend);
881 } else {
882 toread = pend;
883 }
884 /* Remove the bits from pend */
885 pend &= ~toread;
886 /* Read the objects */
887 n = c_can_read_objects(dev, priv, toread, quota);
888 pkts += n;
889 quota -= n;
890 }
891
892 if (pkts)
893 can_led_event(dev, CAN_LED_EVENT_RX);
894
895 return pkts;
896 }
897
898 static int c_can_handle_state_change(struct net_device *dev,
899 enum c_can_bus_error_types error_type)
900 {
901 unsigned int reg_err_counter;
902 unsigned int rx_err_passive;
903 struct c_can_priv *priv = netdev_priv(dev);
904 struct net_device_stats *stats = &dev->stats;
905 struct can_frame *cf;
906 struct sk_buff *skb;
907 struct can_berr_counter bec;
908
909 switch (error_type) {
910 case C_CAN_ERROR_WARNING:
911 /* error warning state */
912 priv->can.can_stats.error_warning++;
913 priv->can.state = CAN_STATE_ERROR_WARNING;
914 break;
915 case C_CAN_ERROR_PASSIVE:
916 /* error passive state */
917 priv->can.can_stats.error_passive++;
918 priv->can.state = CAN_STATE_ERROR_PASSIVE;
919 break;
920 case C_CAN_BUS_OFF:
921 /* bus-off state */
922 priv->can.state = CAN_STATE_BUS_OFF;
923 can_bus_off(dev);
924 break;
925 default:
926 break;
927 }
928
929 /* propagate the error condition to the CAN stack */
930 skb = alloc_can_err_skb(dev, &cf);
931 if (unlikely(!skb))
932 return 0;
933
934 __c_can_get_berr_counter(dev, &bec);
935 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
936 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
937 ERR_CNT_RP_SHIFT;
938
939 switch (error_type) {
940 case C_CAN_ERROR_WARNING:
941 /* error warning state */
942 cf->can_id |= CAN_ERR_CRTL;
943 cf->data[1] = (bec.txerr > bec.rxerr) ?
944 CAN_ERR_CRTL_TX_WARNING :
945 CAN_ERR_CRTL_RX_WARNING;
946 cf->data[6] = bec.txerr;
947 cf->data[7] = bec.rxerr;
948
949 break;
950 case C_CAN_ERROR_PASSIVE:
951 /* error passive state */
952 cf->can_id |= CAN_ERR_CRTL;
953 if (rx_err_passive)
954 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
955 if (bec.txerr > 127)
956 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
957
958 cf->data[6] = bec.txerr;
959 cf->data[7] = bec.rxerr;
960 break;
961 case C_CAN_BUS_OFF:
962 /* bus-off state */
963 cf->can_id |= CAN_ERR_BUSOFF;
964 can_bus_off(dev);
965 break;
966 default:
967 break;
968 }
969
970 stats->rx_packets++;
971 stats->rx_bytes += cf->can_dlc;
972 netif_receive_skb(skb);
973
974 return 1;
975 }
976
977 static int c_can_handle_bus_err(struct net_device *dev,
978 enum c_can_lec_type lec_type)
979 {
980 struct c_can_priv *priv = netdev_priv(dev);
981 struct net_device_stats *stats = &dev->stats;
982 struct can_frame *cf;
983 struct sk_buff *skb;
984
985 /*
986 * early exit if no lec update or no error.
987 * no lec update means that no CAN bus event has been detected
988 * since CPU wrote 0x7 value to status reg.
989 */
990 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
991 return 0;
992
993 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
994 return 0;
995
996 /* common for all type of bus errors */
997 priv->can.can_stats.bus_error++;
998 stats->rx_errors++;
999
1000 /* propagate the error condition to the CAN stack */
1001 skb = alloc_can_err_skb(dev, &cf);
1002 if (unlikely(!skb))
1003 return 0;
1004
1005 /*
1006 * check for 'last error code' which tells us the
1007 * type of the last error to occur on the CAN bus
1008 */
1009 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1010 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1011
1012 switch (lec_type) {
1013 case LEC_STUFF_ERROR:
1014 netdev_dbg(dev, "stuff error\n");
1015 cf->data[2] |= CAN_ERR_PROT_STUFF;
1016 break;
1017 case LEC_FORM_ERROR:
1018 netdev_dbg(dev, "form error\n");
1019 cf->data[2] |= CAN_ERR_PROT_FORM;
1020 break;
1021 case LEC_ACK_ERROR:
1022 netdev_dbg(dev, "ack error\n");
1023 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
1024 CAN_ERR_PROT_LOC_ACK_DEL);
1025 break;
1026 case LEC_BIT1_ERROR:
1027 netdev_dbg(dev, "bit1 error\n");
1028 cf->data[2] |= CAN_ERR_PROT_BIT1;
1029 break;
1030 case LEC_BIT0_ERROR:
1031 netdev_dbg(dev, "bit0 error\n");
1032 cf->data[2] |= CAN_ERR_PROT_BIT0;
1033 break;
1034 case LEC_CRC_ERROR:
1035 netdev_dbg(dev, "CRC error\n");
1036 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
1037 CAN_ERR_PROT_LOC_CRC_DEL);
1038 break;
1039 default:
1040 break;
1041 }
1042
1043 /* set a `lec` value so that we can check for updates later */
1044 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1045
1046 stats->rx_packets++;
1047 stats->rx_bytes += cf->can_dlc;
1048 netif_receive_skb(skb);
1049 return 1;
1050 }
1051
1052 static int c_can_poll(struct napi_struct *napi, int quota)
1053 {
1054 u16 irqstatus;
1055 int work_done = 0;
1056 struct net_device *dev = napi->dev;
1057 struct c_can_priv *priv = netdev_priv(dev);
1058
1059 irqstatus = priv->irqstatus;
1060 if (!irqstatus)
1061 goto end;
1062
1063 /* status events have the highest priority */
1064 if (irqstatus == STATUS_INTERRUPT) {
1065 priv->current_status = priv->read_reg(priv,
1066 C_CAN_STS_REG);
1067
1068 /* handle Tx/Rx events */
1069 if (priv->current_status & STATUS_TXOK)
1070 priv->write_reg(priv, C_CAN_STS_REG,
1071 priv->current_status & ~STATUS_TXOK);
1072
1073 if (priv->current_status & STATUS_RXOK)
1074 priv->write_reg(priv, C_CAN_STS_REG,
1075 priv->current_status & ~STATUS_RXOK);
1076
1077 /* handle state changes */
1078 if ((priv->current_status & STATUS_EWARN) &&
1079 (!(priv->last_status & STATUS_EWARN))) {
1080 netdev_dbg(dev, "entered error warning state\n");
1081 work_done += c_can_handle_state_change(dev,
1082 C_CAN_ERROR_WARNING);
1083 }
1084 if ((priv->current_status & STATUS_EPASS) &&
1085 (!(priv->last_status & STATUS_EPASS))) {
1086 netdev_dbg(dev, "entered error passive state\n");
1087 work_done += c_can_handle_state_change(dev,
1088 C_CAN_ERROR_PASSIVE);
1089 }
1090 if ((priv->current_status & STATUS_BOFF) &&
1091 (!(priv->last_status & STATUS_BOFF))) {
1092 netdev_dbg(dev, "entered bus off state\n");
1093 work_done += c_can_handle_state_change(dev,
1094 C_CAN_BUS_OFF);
1095 goto end;
1096 }
1097
1098 /* handle bus recovery events */
1099 if ((!(priv->current_status & STATUS_BOFF)) &&
1100 (priv->last_status & STATUS_BOFF)) {
1101 netdev_dbg(dev, "left bus off state\n");
1102 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1103 }
1104 if ((!(priv->current_status & STATUS_EPASS)) &&
1105 (priv->last_status & STATUS_EPASS)) {
1106 netdev_dbg(dev, "left error passive state\n");
1107 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1108 }
1109
1110 priv->last_status = priv->current_status;
1111
1112 /* handle lec errors on the bus */
1113 work_done += c_can_handle_bus_err(dev,
1114 priv->current_status & LEC_MASK);
1115 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1116 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1117 /* handle events corresponding to receive message objects */
1118 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1119 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1120 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1121 /* handle events corresponding to transmit message objects */
1122 c_can_do_tx(dev);
1123 }
1124
1125 end:
1126 if (work_done < quota) {
1127 napi_complete(napi);
1128 /* enable all IRQs if we are not in bus off state */
1129 if (priv->can.state != CAN_STATE_BUS_OFF)
1130 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1131 }
1132
1133 return work_done;
1134 }
1135
1136 static irqreturn_t c_can_isr(int irq, void *dev_id)
1137 {
1138 struct net_device *dev = (struct net_device *)dev_id;
1139 struct c_can_priv *priv = netdev_priv(dev);
1140
1141 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
1142 if (!priv->irqstatus)
1143 return IRQ_NONE;
1144
1145 /* disable all interrupts and schedule the NAPI */
1146 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1147 napi_schedule(&priv->napi);
1148
1149 return IRQ_HANDLED;
1150 }
1151
1152 static int c_can_open(struct net_device *dev)
1153 {
1154 int err;
1155 struct c_can_priv *priv = netdev_priv(dev);
1156
1157 c_can_pm_runtime_get_sync(priv);
1158 c_can_reset_ram(priv, true);
1159
1160 /* open the can device */
1161 err = open_candev(dev);
1162 if (err) {
1163 netdev_err(dev, "failed to open can device\n");
1164 goto exit_open_fail;
1165 }
1166
1167 /* register interrupt handler */
1168 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1169 dev);
1170 if (err < 0) {
1171 netdev_err(dev, "failed to request interrupt\n");
1172 goto exit_irq_fail;
1173 }
1174
1175 /* start the c_can controller */
1176 err = c_can_start(dev);
1177 if (err)
1178 goto exit_start_fail;
1179
1180 can_led_event(dev, CAN_LED_EVENT_OPEN);
1181
1182 napi_enable(&priv->napi);
1183 /* enable status change, error and module interrupts */
1184 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1185 netif_start_queue(dev);
1186
1187 return 0;
1188
1189 exit_start_fail:
1190 free_irq(dev->irq, dev);
1191 exit_irq_fail:
1192 close_candev(dev);
1193 exit_open_fail:
1194 c_can_reset_ram(priv, false);
1195 c_can_pm_runtime_put_sync(priv);
1196 return err;
1197 }
1198
1199 static int c_can_close(struct net_device *dev)
1200 {
1201 struct c_can_priv *priv = netdev_priv(dev);
1202
1203 netif_stop_queue(dev);
1204 napi_disable(&priv->napi);
1205 c_can_stop(dev);
1206 free_irq(dev->irq, dev);
1207 close_candev(dev);
1208
1209 c_can_reset_ram(priv, false);
1210 c_can_pm_runtime_put_sync(priv);
1211
1212 can_led_event(dev, CAN_LED_EVENT_STOP);
1213
1214 return 0;
1215 }
1216
1217 struct net_device *alloc_c_can_dev(void)
1218 {
1219 struct net_device *dev;
1220 struct c_can_priv *priv;
1221
1222 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1223 if (!dev)
1224 return NULL;
1225
1226 priv = netdev_priv(dev);
1227 spin_lock_init(&priv->xmit_lock);
1228 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1229
1230 priv->dev = dev;
1231 priv->can.bittiming_const = &c_can_bittiming_const;
1232 priv->can.do_set_mode = c_can_set_mode;
1233 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1234 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1235 CAN_CTRLMODE_LISTENONLY |
1236 CAN_CTRLMODE_BERR_REPORTING;
1237
1238 return dev;
1239 }
1240 EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1241
1242 #ifdef CONFIG_PM
1243 int c_can_power_down(struct net_device *dev)
1244 {
1245 u32 val;
1246 unsigned long time_out;
1247 struct c_can_priv *priv = netdev_priv(dev);
1248
1249 if (!(dev->flags & IFF_UP))
1250 return 0;
1251
1252 WARN_ON(priv->type != BOSCH_D_CAN);
1253
1254 /* set PDR value so the device goes to power down mode */
1255 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1256 val |= CONTROL_EX_PDR;
1257 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1258
1259 /* Wait for the PDA bit to get set */
1260 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1261 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1262 time_after(time_out, jiffies))
1263 cpu_relax();
1264
1265 if (time_after(jiffies, time_out))
1266 return -ETIMEDOUT;
1267
1268 c_can_stop(dev);
1269
1270 c_can_reset_ram(priv, false);
1271 c_can_pm_runtime_put_sync(priv);
1272
1273 return 0;
1274 }
1275 EXPORT_SYMBOL_GPL(c_can_power_down);
1276
1277 int c_can_power_up(struct net_device *dev)
1278 {
1279 u32 val;
1280 unsigned long time_out;
1281 struct c_can_priv *priv = netdev_priv(dev);
1282 int ret;
1283
1284 if (!(dev->flags & IFF_UP))
1285 return 0;
1286
1287 WARN_ON(priv->type != BOSCH_D_CAN);
1288
1289 c_can_pm_runtime_get_sync(priv);
1290 c_can_reset_ram(priv, true);
1291
1292 /* Clear PDR and INIT bits */
1293 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1294 val &= ~CONTROL_EX_PDR;
1295 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1296 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1297 val &= ~CONTROL_INIT;
1298 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1299
1300 /* Wait for the PDA bit to get clear */
1301 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1302 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1303 time_after(time_out, jiffies))
1304 cpu_relax();
1305
1306 if (time_after(jiffies, time_out))
1307 return -ETIMEDOUT;
1308
1309 ret = c_can_start(dev);
1310 if (!ret)
1311 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1312
1313 return ret;
1314 }
1315 EXPORT_SYMBOL_GPL(c_can_power_up);
1316 #endif
1317
1318 void free_c_can_dev(struct net_device *dev)
1319 {
1320 struct c_can_priv *priv = netdev_priv(dev);
1321
1322 netif_napi_del(&priv->napi);
1323 free_candev(dev);
1324 }
1325 EXPORT_SYMBOL_GPL(free_c_can_dev);
1326
1327 static const struct net_device_ops c_can_netdev_ops = {
1328 .ndo_open = c_can_open,
1329 .ndo_stop = c_can_close,
1330 .ndo_start_xmit = c_can_start_xmit,
1331 .ndo_change_mtu = can_change_mtu,
1332 };
1333
1334 int register_c_can_dev(struct net_device *dev)
1335 {
1336 struct c_can_priv *priv = netdev_priv(dev);
1337 int err;
1338
1339 c_can_pm_runtime_enable(priv);
1340
1341 dev->flags |= IFF_ECHO; /* we support local echo */
1342 dev->netdev_ops = &c_can_netdev_ops;
1343
1344 err = register_candev(dev);
1345 if (err)
1346 c_can_pm_runtime_disable(priv);
1347 else
1348 devm_can_led_init(dev);
1349
1350 return err;
1351 }
1352 EXPORT_SYMBOL_GPL(register_c_can_dev);
1353
1354 void unregister_c_can_dev(struct net_device *dev)
1355 {
1356 struct c_can_priv *priv = netdev_priv(dev);
1357
1358 unregister_candev(dev);
1359
1360 c_can_pm_runtime_disable(priv);
1361 }
1362 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1363
1364 MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1365 MODULE_LICENSE("GPL v2");
1366 MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");