2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
30 #include "fjes_trace.h"
34 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
35 #define DRV_NAME "fjes"
36 char fjes_driver_name
[] = DRV_NAME
;
37 char fjes_driver_version
[] = DRV_VERSION
;
38 static const char fjes_driver_string
[] =
39 "FUJITSU Extended Socket Network Device Driver";
40 static const char fjes_copyright
[] =
41 "Copyright (c) 2015 FUJITSU LIMITED";
43 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
44 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_VERSION
);
48 static int fjes_request_irq(struct fjes_adapter
*);
49 static void fjes_free_irq(struct fjes_adapter
*);
51 static int fjes_open(struct net_device
*);
52 static int fjes_close(struct net_device
*);
53 static int fjes_setup_resources(struct fjes_adapter
*);
54 static void fjes_free_resources(struct fjes_adapter
*);
55 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
56 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
57 static void fjes_tx_stall_task(struct work_struct
*);
58 static void fjes_force_close_task(struct work_struct
*);
59 static irqreturn_t
fjes_intr(int, void*);
60 static void fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
61 static int fjes_change_mtu(struct net_device
*, int);
62 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
63 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
64 static void fjes_tx_retry(struct net_device
*);
66 static int fjes_acpi_add(struct acpi_device
*);
67 static int fjes_acpi_remove(struct acpi_device
*);
68 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
70 static int fjes_probe(struct platform_device
*);
71 static int fjes_remove(struct platform_device
*);
73 static int fjes_sw_init(struct fjes_adapter
*);
74 static void fjes_netdev_setup(struct net_device
*);
75 static void fjes_irq_watch_task(struct work_struct
*);
76 static void fjes_watch_unshare_task(struct work_struct
*);
77 static void fjes_rx_irq(struct fjes_adapter
*, int);
78 static int fjes_poll(struct napi_struct
*, int);
80 static const struct acpi_device_id fjes_acpi_ids
[] = {
84 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
86 static struct acpi_driver fjes_acpi_driver
= {
93 .remove
= fjes_acpi_remove
,
97 static struct platform_driver fjes_driver
= {
102 .remove
= fjes_remove
,
105 static struct resource fjes_resource
[] = {
107 .flags
= IORESOURCE_MEM
,
112 .flags
= IORESOURCE_IRQ
,
118 static int fjes_acpi_add(struct acpi_device
*device
)
120 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
121 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
122 struct platform_device
*plat_dev
;
123 union acpi_object
*str
;
127 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
128 if (ACPI_FAILURE(status
))
131 str
= buffer
.pointer
;
132 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
133 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
134 str_buf
, sizeof(str_buf
) - 1);
137 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
138 kfree(buffer
.pointer
);
141 kfree(buffer
.pointer
);
143 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
144 fjes_get_acpi_resource
, fjes_resource
);
145 if (ACPI_FAILURE(status
))
148 /* create platform_device */
149 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
150 ARRAY_SIZE(fjes_resource
));
151 device
->driver_data
= plat_dev
;
156 static int fjes_acpi_remove(struct acpi_device
*device
)
158 struct platform_device
*plat_dev
;
160 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
161 platform_device_unregister(plat_dev
);
167 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
169 struct acpi_resource_address32
*addr
;
170 struct acpi_resource_irq
*irq
;
171 struct resource
*res
= data
;
173 switch (acpi_res
->type
) {
174 case ACPI_RESOURCE_TYPE_ADDRESS32
:
175 addr
= &acpi_res
->data
.address32
;
176 res
[0].start
= addr
->address
.minimum
;
177 res
[0].end
= addr
->address
.minimum
+
178 addr
->address
.address_length
- 1;
181 case ACPI_RESOURCE_TYPE_IRQ
:
182 irq
= &acpi_res
->data
.irq
;
183 if (irq
->interrupt_count
!= 1)
185 res
[1].start
= irq
->interrupts
[0];
186 res
[1].end
= irq
->interrupts
[0];
196 static int fjes_request_irq(struct fjes_adapter
*adapter
)
198 struct net_device
*netdev
= adapter
->netdev
;
201 adapter
->interrupt_watch_enable
= true;
202 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
203 queue_delayed_work(adapter
->control_wq
,
204 &adapter
->interrupt_watch_task
,
205 FJES_IRQ_WATCH_DELAY
);
208 if (!adapter
->irq_registered
) {
209 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
210 IRQF_SHARED
, netdev
->name
, adapter
);
212 adapter
->irq_registered
= false;
214 adapter
->irq_registered
= true;
220 static void fjes_free_irq(struct fjes_adapter
*adapter
)
222 struct fjes_hw
*hw
= &adapter
->hw
;
224 adapter
->interrupt_watch_enable
= false;
225 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
227 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
229 if (adapter
->irq_registered
) {
230 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
231 adapter
->irq_registered
= false;
235 static const struct net_device_ops fjes_netdev_ops
= {
236 .ndo_open
= fjes_open
,
237 .ndo_stop
= fjes_close
,
238 .ndo_start_xmit
= fjes_xmit_frame
,
239 .ndo_get_stats64
= fjes_get_stats64
,
240 .ndo_change_mtu
= fjes_change_mtu
,
241 .ndo_tx_timeout
= fjes_tx_retry
,
242 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
243 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
246 /* fjes_open - Called when a network interface is made active */
247 static int fjes_open(struct net_device
*netdev
)
249 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
250 struct fjes_hw
*hw
= &adapter
->hw
;
253 if (adapter
->open_guard
)
256 result
= fjes_setup_resources(adapter
);
260 hw
->txrx_stop_req_bit
= 0;
261 hw
->epstop_req_bit
= 0;
263 napi_enable(&adapter
->napi
);
265 fjes_hw_capture_interrupt_status(hw
);
267 result
= fjes_request_irq(adapter
);
271 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
273 netif_tx_start_all_queues(netdev
);
274 netif_carrier_on(netdev
);
279 fjes_free_irq(adapter
);
280 napi_disable(&adapter
->napi
);
283 fjes_free_resources(adapter
);
287 /* fjes_close - Disables a network interface */
288 static int fjes_close(struct net_device
*netdev
)
290 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
291 struct fjes_hw
*hw
= &adapter
->hw
;
295 netif_tx_stop_all_queues(netdev
);
296 netif_carrier_off(netdev
);
298 fjes_hw_raise_epstop(hw
);
300 napi_disable(&adapter
->napi
);
302 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
303 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
304 if (epidx
== hw
->my_epid
)
307 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
309 adapter
->hw
.ep_shm_info
[epidx
]
310 .tx
.info
->v1i
.rx_status
&=
313 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
315 fjes_free_irq(adapter
);
317 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
318 cancel_work_sync(&adapter
->unshare_watch_task
);
319 adapter
->unshare_watch_bitmask
= 0;
320 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
321 cancel_work_sync(&adapter
->tx_stall_task
);
323 cancel_work_sync(&hw
->update_zone_task
);
324 cancel_work_sync(&hw
->epstop_task
);
326 fjes_hw_wait_epstop(hw
);
328 fjes_free_resources(adapter
);
333 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
335 struct net_device
*netdev
= adapter
->netdev
;
336 struct ep_share_mem_info
*buf_pair
;
337 struct fjes_hw
*hw
= &adapter
->hw
;
342 mutex_lock(&hw
->hw_info
.lock
);
343 result
= fjes_hw_request_info(hw
);
346 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
347 hw
->ep_shm_info
[epidx
].es_status
=
348 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
349 hw
->ep_shm_info
[epidx
].zone
=
350 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
356 adapter
->force_reset
= true;
358 mutex_unlock(&hw
->hw_info
.lock
);
361 mutex_unlock(&hw
->hw_info
.lock
);
363 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
364 if ((epidx
!= hw
->my_epid
) &&
365 (hw
->ep_shm_info
[epidx
].es_status
==
366 FJES_ZONING_STATUS_ENABLE
)) {
367 fjes_hw_raise_interrupt(hw
, epidx
,
368 REG_ICTL_MASK_INFO_UPDATE
);
369 hw
->ep_shm_info
[epidx
].ep_stats
370 .send_intr_zoneupdate
+= 1;
374 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
376 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
377 if (epidx
== hw
->my_epid
)
380 buf_pair
= &hw
->ep_shm_info
[epidx
];
382 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
383 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
385 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
387 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
388 mutex_lock(&hw
->hw_info
.lock
);
390 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
391 mutex_unlock(&hw
->hw_info
.lock
);
399 adapter
->force_reset
= true;
403 hw
->ep_shm_info
[epidx
].ep_stats
404 .com_regist_buf_exec
+= 1;
411 static void fjes_free_resources(struct fjes_adapter
*adapter
)
413 struct net_device
*netdev
= adapter
->netdev
;
414 struct fjes_device_command_param param
;
415 struct ep_share_mem_info
*buf_pair
;
416 struct fjes_hw
*hw
= &adapter
->hw
;
417 bool reset_flag
= false;
422 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
423 if (epidx
== hw
->my_epid
)
426 mutex_lock(&hw
->hw_info
.lock
);
427 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
428 mutex_unlock(&hw
->hw_info
.lock
);
430 hw
->ep_shm_info
[epidx
].ep_stats
.com_unregist_buf_exec
+= 1;
435 buf_pair
= &hw
->ep_shm_info
[epidx
];
437 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
438 fjes_hw_setup_epbuf(&buf_pair
->tx
,
439 netdev
->dev_addr
, netdev
->mtu
);
440 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
442 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
445 if (reset_flag
|| adapter
->force_reset
) {
446 result
= fjes_hw_reset(hw
);
448 adapter
->force_reset
= false;
451 adapter
->open_guard
= true;
453 hw
->hw_info
.buffer_share_bit
= 0;
455 memset((void *)¶m
, 0, sizeof(param
));
457 param
.req_len
= hw
->hw_info
.req_buf_size
;
458 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
459 param
.res_len
= hw
->hw_info
.res_buf_size
;
460 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
461 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
463 fjes_hw_init_command_registers(hw
, ¶m
);
467 static void fjes_tx_stall_task(struct work_struct
*work
)
469 struct fjes_adapter
*adapter
= container_of(work
,
470 struct fjes_adapter
, tx_stall_task
);
471 struct net_device
*netdev
= adapter
->netdev
;
472 struct fjes_hw
*hw
= &adapter
->hw
;
473 int all_queue_available
, sendable
;
474 enum ep_partner_status pstatus
;
475 int max_epid
, my_epid
, epid
;
476 union ep_buffer_info
*info
;
480 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
481 netif_wake_queue(netdev
);
485 my_epid
= hw
->my_epid
;
486 max_epid
= hw
->max_epid
;
488 for (i
= 0; i
< 5; i
++) {
489 all_queue_available
= 1;
491 for (epid
= 0; epid
< max_epid
; epid
++) {
495 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
496 sendable
= (pstatus
== EP_PARTNER_SHARED
);
500 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
502 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
505 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
506 info
->v1i
.count_max
)) {
507 all_queue_available
= 0;
512 if (all_queue_available
) {
513 netif_wake_queue(netdev
);
518 usleep_range(50, 100);
520 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
523 static void fjes_force_close_task(struct work_struct
*work
)
525 struct fjes_adapter
*adapter
= container_of(work
,
526 struct fjes_adapter
, force_close_task
);
527 struct net_device
*netdev
= adapter
->netdev
;
534 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
536 struct fjes_adapter
*adapter
= container_of(work
,
537 struct fjes_adapter
, raise_intr_rxdata_task
);
538 struct fjes_hw
*hw
= &adapter
->hw
;
539 enum ep_partner_status pstatus
;
540 int max_epid
, my_epid
, epid
;
542 my_epid
= hw
->my_epid
;
543 max_epid
= hw
->max_epid
;
545 for (epid
= 0; epid
< max_epid
; epid
++)
546 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
548 for (epid
= 0; epid
< max_epid
; epid
++) {
552 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
553 if (pstatus
== EP_PARTNER_SHARED
) {
554 hw
->ep_shm_info
[epid
].tx_status_work
=
555 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
557 if (hw
->ep_shm_info
[epid
].tx_status_work
==
558 FJES_TX_DELAY_SEND_PENDING
) {
559 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
560 FJES_TX_DELAY_SEND_NONE
;
565 for (epid
= 0; epid
< max_epid
; epid
++) {
569 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
570 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
571 FJES_TX_DELAY_SEND_PENDING
) &&
572 (pstatus
== EP_PARTNER_SHARED
) &&
573 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
574 FJES_RX_POLL_WORK
)) {
575 fjes_hw_raise_interrupt(hw
, epid
,
576 REG_ICTL_MASK_RX_DATA
);
577 hw
->ep_shm_info
[epid
].ep_stats
.send_intr_rx
+= 1;
581 usleep_range(500, 1000);
584 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
585 void *data
, size_t len
)
589 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
594 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
595 FJES_TX_DELAY_SEND_PENDING
;
596 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
597 queue_work(adapter
->txrx_wq
,
598 &adapter
->raise_intr_rxdata_task
);
605 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
607 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
608 struct fjes_hw
*hw
= &adapter
->hw
;
610 int max_epid
, my_epid
, dest_epid
;
611 enum ep_partner_status pstatus
;
612 struct netdev_queue
*cur_queue
;
613 char shortpkt
[VLAN_ETH_HLEN
];
624 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
626 eth
= (struct ethhdr
*)skb
->data
;
627 my_epid
= hw
->my_epid
;
629 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
634 if (is_multicast_ether_addr(eth
->h_dest
)) {
636 max_epid
= hw
->max_epid
;
638 } else if (is_local_ether_addr(eth
->h_dest
)) {
639 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
640 max_epid
= dest_epid
+ 1;
642 if ((eth
->h_dest
[0] == 0x02) &&
643 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
644 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
645 (dest_epid
< hw
->max_epid
)) {
652 adapter
->stats64
.tx_packets
+= 1;
653 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
654 adapter
->stats64
.tx_bytes
+= len
;
655 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
662 adapter
->stats64
.tx_packets
+= 1;
663 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
664 adapter
->stats64
.tx_bytes
+= len
;
665 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
668 for (; dest_epid
< max_epid
; dest_epid
++) {
669 if (my_epid
== dest_epid
)
672 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
673 if (pstatus
!= EP_PARTNER_SHARED
) {
675 hw
->ep_shm_info
[dest_epid
].ep_stats
676 .tx_dropped_not_shared
+= 1;
678 } else if (!fjes_hw_check_epbuf_version(
679 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
680 /* version is NOT 0 */
681 adapter
->stats64
.tx_carrier_errors
+= 1;
682 hw
->ep_shm_info
[dest_epid
].net_stats
683 .tx_carrier_errors
+= 1;
684 hw
->ep_shm_info
[dest_epid
].ep_stats
685 .tx_dropped_ver_mismatch
+= 1;
688 } else if (!fjes_hw_check_mtu(
689 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
691 adapter
->stats64
.tx_dropped
+= 1;
692 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
693 adapter
->stats64
.tx_errors
+= 1;
694 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
695 hw
->ep_shm_info
[dest_epid
].ep_stats
696 .tx_dropped_buf_size_mismatch
+= 1;
700 !fjes_hw_check_vlan_id(
701 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
703 hw
->ep_shm_info
[dest_epid
].ep_stats
704 .tx_dropped_vlanid_mismatch
+= 1;
707 if (len
< VLAN_ETH_HLEN
) {
708 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
709 memcpy(shortpkt
, skb
->data
, skb
->len
);
714 if (adapter
->tx_retry_count
== 0) {
715 adapter
->tx_start_jiffies
= jiffies
;
716 adapter
->tx_retry_count
= 1;
718 adapter
->tx_retry_count
++;
721 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
726 (long)adapter
->tx_start_jiffies
) >=
727 FJES_TX_RETRY_TIMEOUT
) {
728 adapter
->stats64
.tx_fifo_errors
+= 1;
729 hw
->ep_shm_info
[dest_epid
].net_stats
730 .tx_fifo_errors
+= 1;
731 adapter
->stats64
.tx_errors
+= 1;
732 hw
->ep_shm_info
[dest_epid
].net_stats
737 netif_trans_update(netdev
);
738 hw
->ep_shm_info
[dest_epid
].ep_stats
739 .tx_buffer_full
+= 1;
740 netif_tx_stop_queue(cur_queue
);
742 if (!work_pending(&adapter
->tx_stall_task
))
743 queue_work(adapter
->txrx_wq
,
744 &adapter
->tx_stall_task
);
746 ret
= NETDEV_TX_BUSY
;
750 adapter
->stats64
.tx_packets
+= 1;
751 hw
->ep_shm_info
[dest_epid
].net_stats
753 adapter
->stats64
.tx_bytes
+= len
;
754 hw
->ep_shm_info
[dest_epid
].net_stats
758 adapter
->tx_retry_count
= 0;
764 if (ret
== NETDEV_TX_OK
) {
767 adapter
->stats64
.tx_packets
+= 1;
768 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
769 adapter
->stats64
.tx_bytes
+= 1;
770 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
777 static void fjes_tx_retry(struct net_device
*netdev
)
779 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
781 netif_tx_wake_queue(queue
);
785 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
787 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
789 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
792 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
794 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
795 bool running
= netif_running(netdev
);
796 struct fjes_hw
*hw
= &adapter
->hw
;
801 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
802 if (new_mtu
<= fjes_support_mtu
[idx
]) {
803 new_mtu
= fjes_support_mtu
[idx
];
804 if (new_mtu
== netdev
->mtu
)
816 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
817 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
818 if (epidx
== hw
->my_epid
)
820 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
821 ~FJES_RX_MTU_CHANGING_DONE
;
823 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
825 netif_tx_stop_all_queues(netdev
);
826 netif_carrier_off(netdev
);
827 cancel_work_sync(&adapter
->tx_stall_task
);
828 napi_disable(&adapter
->napi
);
832 netif_tx_stop_all_queues(netdev
);
835 netdev
->mtu
= new_mtu
;
838 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
839 if (epidx
== hw
->my_epid
)
842 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
843 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
847 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
848 FJES_RX_MTU_CHANGING_DONE
;
849 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
852 netif_tx_wake_all_queues(netdev
);
853 netif_carrier_on(netdev
);
854 napi_enable(&adapter
->napi
);
855 napi_schedule(&adapter
->napi
);
861 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
862 __be16 proto
, u16 vid
)
864 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
868 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
869 if (epid
== adapter
->hw
.my_epid
)
872 if (!fjes_hw_check_vlan_id(
873 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
874 ret
= fjes_hw_set_vlan_id(
875 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
878 return ret
? 0 : -ENOSPC
;
881 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
882 __be16 proto
, u16 vid
)
884 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
887 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
888 if (epid
== adapter
->hw
.my_epid
)
891 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
897 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
900 struct fjes_hw
*hw
= &adapter
->hw
;
901 enum ep_partner_status status
;
904 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
905 trace_fjes_txrx_stop_req_irq_pre(hw
, src_epid
, status
);
907 case EP_PARTNER_UNSHARE
:
908 case EP_PARTNER_COMPLETE
:
911 case EP_PARTNER_WAITING
:
912 if (src_epid
< hw
->my_epid
) {
913 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
914 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
915 FJES_RX_STOP_REQ_DONE
;
916 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
918 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
919 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
921 if (!work_pending(&adapter
->unshare_watch_task
))
922 queue_work(adapter
->control_wq
,
923 &adapter
->unshare_watch_task
);
926 case EP_PARTNER_SHARED
:
927 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
928 FJES_RX_STOP_REQ_REQUEST
) {
929 set_bit(src_epid
, &hw
->epstop_req_bit
);
930 if (!work_pending(&hw
->epstop_task
))
931 queue_work(adapter
->control_wq
,
936 trace_fjes_txrx_stop_req_irq_post(hw
, src_epid
);
939 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
941 struct fjes_hw
*hw
= &adapter
->hw
;
942 enum ep_partner_status status
;
945 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
947 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
948 trace_fjes_stop_req_irq_pre(hw
, src_epid
, status
);
950 case EP_PARTNER_WAITING
:
951 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
952 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
953 FJES_RX_STOP_REQ_DONE
;
954 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
955 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
957 case EP_PARTNER_UNSHARE
:
958 case EP_PARTNER_COMPLETE
:
960 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
961 if (!work_pending(&adapter
->unshare_watch_task
))
962 queue_work(adapter
->control_wq
,
963 &adapter
->unshare_watch_task
);
965 case EP_PARTNER_SHARED
:
966 set_bit(src_epid
, &hw
->epstop_req_bit
);
968 if (!work_pending(&hw
->epstop_task
))
969 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
972 trace_fjes_stop_req_irq_post(hw
, src_epid
);
975 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
978 struct fjes_hw
*hw
= &adapter
->hw
;
980 if (!work_pending(&hw
->update_zone_task
))
981 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
984 static irqreturn_t
fjes_intr(int irq
, void *data
)
986 struct fjes_adapter
*adapter
= data
;
987 struct fjes_hw
*hw
= &adapter
->hw
;
991 icr
= fjes_hw_capture_interrupt_status(hw
);
993 if (icr
& REG_IS_MASK_IS_ASSERT
) {
994 if (icr
& REG_ICTL_MASK_RX_DATA
) {
995 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
996 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1000 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
) {
1001 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1002 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1003 .recv_intr_stop
+= 1;
1006 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
) {
1007 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1008 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1009 .recv_intr_unshare
+= 1;
1012 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
1013 fjes_hw_set_irqmask(hw
,
1014 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
1016 if (icr
& REG_ICTL_MASK_INFO_UPDATE
) {
1017 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
1018 hw
->ep_shm_info
[icr
& REG_IS_MASK_EPID
].ep_stats
1019 .recv_intr_zoneupdate
+= 1;
1030 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1033 struct fjes_hw
*hw
= &adapter
->hw
;
1034 enum ep_partner_status pstatus
;
1035 int max_epid
, cur_epid
;
1038 max_epid
= hw
->max_epid
;
1039 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1041 for (i
= 0; i
< max_epid
; i
++) {
1042 cur_epid
= (start_epid
+ i
) % max_epid
;
1043 if (cur_epid
== hw
->my_epid
)
1046 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1047 if (pstatus
== EP_PARTNER_SHARED
) {
1048 if (!fjes_hw_epbuf_rx_is_empty(
1049 &hw
->ep_shm_info
[cur_epid
].rx
))
1056 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1061 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1066 fjes_hw_epbuf_rx_curpkt_get_addr(
1067 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1072 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1074 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1077 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1079 struct fjes_hw
*hw
= &adapter
->hw
;
1081 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1083 adapter
->unset_rx_last
= true;
1084 napi_schedule(&adapter
->napi
);
1087 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1089 struct fjes_adapter
*adapter
=
1090 container_of(napi
, struct fjes_adapter
, napi
);
1091 struct net_device
*netdev
= napi
->dev
;
1092 struct fjes_hw
*hw
= &adapter
->hw
;
1093 struct sk_buff
*skb
;
1100 spin_lock(&hw
->rx_status_lock
);
1101 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1102 if (epidx
== hw
->my_epid
)
1105 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1107 adapter
->hw
.ep_shm_info
[epidx
]
1108 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1110 spin_unlock(&hw
->rx_status_lock
);
1112 while (work_done
< budget
) {
1113 prefetch(&adapter
->hw
);
1114 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1117 skb
= napi_alloc_skb(napi
, frame_len
);
1119 adapter
->stats64
.rx_dropped
+= 1;
1120 hw
->ep_shm_info
[cur_epid
].net_stats
1122 adapter
->stats64
.rx_errors
+= 1;
1123 hw
->ep_shm_info
[cur_epid
].net_stats
1126 memcpy(skb_put(skb
, frame_len
),
1128 skb
->protocol
= eth_type_trans(skb
, netdev
);
1129 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1131 netif_receive_skb(skb
);
1135 adapter
->stats64
.rx_packets
+= 1;
1136 hw
->ep_shm_info
[cur_epid
].net_stats
1138 adapter
->stats64
.rx_bytes
+= frame_len
;
1139 hw
->ep_shm_info
[cur_epid
].net_stats
1140 .rx_bytes
+= frame_len
;
1142 if (is_multicast_ether_addr(
1143 ((struct ethhdr
*)frame
)->h_dest
)) {
1144 adapter
->stats64
.multicast
+= 1;
1145 hw
->ep_shm_info
[cur_epid
].net_stats
1150 fjes_rxframe_release(adapter
, cur_epid
);
1151 adapter
->unset_rx_last
= true;
1157 if (work_done
< budget
) {
1158 napi_complete_done(napi
, work_done
);
1160 if (adapter
->unset_rx_last
) {
1161 adapter
->rx_last_jiffies
= jiffies
;
1162 adapter
->unset_rx_last
= false;
1165 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1166 napi_reschedule(napi
);
1168 spin_lock(&hw
->rx_status_lock
);
1169 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1170 if (epidx
== hw
->my_epid
)
1172 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1174 adapter
->hw
.ep_shm_info
[epidx
].tx
1175 .info
->v1i
.rx_status
&=
1178 spin_unlock(&hw
->rx_status_lock
);
1180 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1187 /* fjes_probe - Device Initialization Routine */
1188 static int fjes_probe(struct platform_device
*plat_dev
)
1190 struct fjes_adapter
*adapter
;
1191 struct net_device
*netdev
;
1192 struct resource
*res
;
1197 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1198 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1204 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1206 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1207 adapter
= netdev_priv(netdev
);
1208 adapter
->netdev
= netdev
;
1209 adapter
->plat_dev
= plat_dev
;
1213 /* setup the private structure */
1214 err
= fjes_sw_init(adapter
);
1216 goto err_free_netdev
;
1218 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1219 adapter
->force_reset
= false;
1220 adapter
->open_guard
= false;
1222 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1223 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1226 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1227 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1228 fjes_raise_intr_rxdata_task
);
1229 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1230 adapter
->unshare_watch_bitmask
= 0;
1232 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1233 adapter
->interrupt_watch_enable
= false;
1235 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1236 hw
->hw_res
.start
= res
->start
;
1237 hw
->hw_res
.size
= resource_size(res
);
1238 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1239 err
= fjes_hw_init(&adapter
->hw
);
1241 goto err_free_netdev
;
1243 /* setup MAC address (02:00:00:00:00:[epid])*/
1244 netdev
->dev_addr
[0] = 2;
1245 netdev
->dev_addr
[1] = 0;
1246 netdev
->dev_addr
[2] = 0;
1247 netdev
->dev_addr
[3] = 0;
1248 netdev
->dev_addr
[4] = 0;
1249 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1251 err
= register_netdev(netdev
);
1255 netif_carrier_off(netdev
);
1257 fjes_dbg_adapter_init(adapter
);
1262 fjes_hw_exit(&adapter
->hw
);
1264 free_netdev(netdev
);
1269 /* fjes_remove - Device Removal Routine */
1270 static int fjes_remove(struct platform_device
*plat_dev
)
1272 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1273 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1274 struct fjes_hw
*hw
= &adapter
->hw
;
1276 fjes_dbg_adapter_exit(adapter
);
1278 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1279 cancel_work_sync(&adapter
->unshare_watch_task
);
1280 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1281 cancel_work_sync(&adapter
->tx_stall_task
);
1282 if (adapter
->control_wq
)
1283 destroy_workqueue(adapter
->control_wq
);
1284 if (adapter
->txrx_wq
)
1285 destroy_workqueue(adapter
->txrx_wq
);
1287 unregister_netdev(netdev
);
1291 netif_napi_del(&adapter
->napi
);
1293 free_netdev(netdev
);
1298 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1300 struct net_device
*netdev
= adapter
->netdev
;
1302 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1307 /* fjes_netdev_setup - netdevice initialization routine */
1308 static void fjes_netdev_setup(struct net_device
*netdev
)
1310 ether_setup(netdev
);
1312 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1313 netdev
->netdev_ops
= &fjes_netdev_ops
;
1314 fjes_set_ethtool_ops(netdev
);
1315 netdev
->mtu
= fjes_support_mtu
[3];
1316 netdev
->min_mtu
= fjes_support_mtu
[0];
1317 netdev
->max_mtu
= fjes_support_mtu
[3];
1318 netdev
->flags
|= IFF_BROADCAST
;
1319 netdev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_CTAG_FILTER
;
1322 static void fjes_irq_watch_task(struct work_struct
*work
)
1324 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1325 struct fjes_adapter
, interrupt_watch_task
);
1327 local_irq_disable();
1328 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1331 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1332 napi_schedule(&adapter
->napi
);
1334 if (adapter
->interrupt_watch_enable
) {
1335 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1336 queue_delayed_work(adapter
->control_wq
,
1337 &adapter
->interrupt_watch_task
,
1338 FJES_IRQ_WATCH_DELAY
);
1342 static void fjes_watch_unshare_task(struct work_struct
*work
)
1344 struct fjes_adapter
*adapter
=
1345 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1347 struct net_device
*netdev
= adapter
->netdev
;
1348 struct fjes_hw
*hw
= &adapter
->hw
;
1350 int unshare_watch
, unshare_reserve
;
1351 int max_epid
, my_epid
, epidx
;
1352 int stop_req
, stop_req_done
;
1353 ulong unshare_watch_bitmask
;
1354 unsigned long flags
;
1359 my_epid
= hw
->my_epid
;
1360 max_epid
= hw
->max_epid
;
1362 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1363 adapter
->unshare_watch_bitmask
= 0;
1365 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1366 (wait_time
< 3000)) {
1367 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1368 if (epidx
== hw
->my_epid
)
1371 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1374 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1376 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1377 FJES_RX_STOP_REQ_DONE
;
1379 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1381 unshare_reserve
= test_bit(epidx
,
1382 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1385 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1386 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1389 mutex_lock(&hw
->hw_info
.lock
);
1390 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1398 &adapter
->force_close_task
)) {
1399 adapter
->force_reset
= true;
1401 &adapter
->force_close_task
);
1405 mutex_unlock(&hw
->hw_info
.lock
);
1406 hw
->ep_shm_info
[epidx
].ep_stats
1407 .com_unregist_buf_exec
+= 1;
1409 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1410 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1411 netdev
->dev_addr
, netdev
->mtu
);
1412 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1414 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1415 clear_bit(epidx
, &unshare_watch_bitmask
);
1417 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1424 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1425 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1426 if (epidx
== hw
->my_epid
)
1430 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1431 mutex_lock(&hw
->hw_info
.lock
);
1433 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1441 &adapter
->force_close_task
)) {
1442 adapter
->force_reset
= true;
1444 &adapter
->force_close_task
);
1448 mutex_unlock(&hw
->hw_info
.lock
);
1450 hw
->ep_shm_info
[epidx
].ep_stats
1451 .com_unregist_buf_exec
+= 1;
1453 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1454 fjes_hw_setup_epbuf(
1455 &hw
->ep_shm_info
[epidx
].tx
,
1456 netdev
->dev_addr
, netdev
->mtu
);
1457 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1460 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1461 clear_bit(epidx
, &unshare_watch_bitmask
);
1462 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1465 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1466 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1467 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1468 ~FJES_RX_STOP_REQ_DONE
;
1469 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1476 /* fjes_init_module - Driver Registration Routine */
1477 static int __init
fjes_init_module(void)
1481 pr_info("%s - version %s - %s\n",
1482 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1486 result
= platform_driver_register(&fjes_driver
);
1492 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1494 goto fail_acpi_driver
;
1499 platform_driver_unregister(&fjes_driver
);
1504 module_init(fjes_init_module
);
1506 /* fjes_exit_module - Driver Exit Cleanup Routine */
1507 static void __exit
fjes_exit_module(void)
1509 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1510 platform_driver_unregister(&fjes_driver
);
1514 module_exit(fjes_exit_module
);