1 // SPDX-License-Identifier: GPL-2.0
3 * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
5 * Copyright (C) 2017, Intel Corporation
6 * Authors: Amir Levy <amir.jer.levy@intel.com>
7 * Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
11 #include <linux/atomic.h>
12 #include <linux/highmem.h>
13 #include <linux/if_vlan.h>
14 #include <linux/jhash.h>
15 #include <linux/module.h>
16 #include <linux/etherdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/sizes.h>
19 #include <linux/thunderbolt.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
23 #include <net/ip6_checksum.h>
25 /* Protocol timeouts in ms */
26 #define TBNET_LOGIN_DELAY 4500
27 #define TBNET_LOGIN_TIMEOUT 500
28 #define TBNET_LOGOUT_TIMEOUT 1000
30 #define TBNET_RING_SIZE 256
31 #define TBNET_LOGIN_RETRIES 60
32 #define TBNET_LOGOUT_RETRIES 10
33 #define TBNET_MATCH_FRAGS_ID BIT(1)
34 #define TBNET_64K_FRAMES BIT(2)
35 #define TBNET_MAX_MTU SZ_64K
36 #define TBNET_FRAME_SIZE SZ_4K
37 #define TBNET_MAX_PAYLOAD_SIZE \
38 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
39 /* Rx packets need to hold space for skb_shared_info */
40 #define TBNET_RX_MAX_SIZE \
41 (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
42 #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
43 #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
45 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
48 * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
49 * @frame_size: size of the data with the frame
50 * @frame_index: running index on the frames
51 * @frame_id: ID of the frame to match frames to specific packet
52 * @frame_count: how many frames assembles a full packet
54 * Each data frame passed to the high-speed DMA ring has this header. If
55 * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
56 * supported then @frame_id is filled, otherwise it stays %0.
58 struct thunderbolt_ip_frame_header
{
65 enum thunderbolt_ip_frame_pdf
{
66 TBIP_PDF_FRAME_START
= 1,
70 enum thunderbolt_ip_type
{
77 struct thunderbolt_ip_header
{
82 uuid_t initiator_uuid
;
88 #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
89 #define TBIP_HDR_SN_MASK GENMASK(28, 27)
90 #define TBIP_HDR_SN_SHIFT 27
92 struct thunderbolt_ip_login
{
93 struct thunderbolt_ip_header hdr
;
99 #define TBIP_LOGIN_PROTO_VERSION 1
101 struct thunderbolt_ip_login_response
{
102 struct thunderbolt_ip_header hdr
;
105 u32 receiver_mac_len
;
109 struct thunderbolt_ip_logout
{
110 struct thunderbolt_ip_header hdr
;
113 struct thunderbolt_ip_status
{
114 struct thunderbolt_ip_header hdr
;
125 u64 rx_length_errors
;
128 u64 rx_missed_errors
;
132 struct net_device
*dev
;
134 struct ring_frame frame
;
138 struct tbnet_frame frames
[TBNET_RING_SIZE
];
141 struct tb_ring
*ring
;
145 * struct tbnet - ThunderboltIP network driver private data
146 * @svc: XDomain service the driver is bound to
147 * @xd: XDomain the service blongs to
148 * @handler: ThunderboltIP configuration protocol handler
149 * @dev: Networking device
150 * @napi: NAPI structure for Rx polling
151 * @stats: Network statistics
152 * @skb: Network packet that is currently processed on Rx path
153 * @command_id: ID used for next configuration protocol packet
154 * @login_sent: ThunderboltIP login message successfully sent
155 * @login_received: ThunderboltIP login message received from the remote
157 * @local_transmit_path: HopID we are using to send out packets
158 * @remote_transmit_path: HopID the other end is using to send packets to us
159 * @connection_lock: Lock serializing access to @login_sent,
160 * @login_received and @transmit_path.
161 * @login_retries: Number of login retries currently done
162 * @login_work: Worker to send ThunderboltIP login packets
163 * @connected_work: Worker that finalizes the ThunderboltIP connection
164 * setup and enables DMA paths for high speed data
166 * @disconnect_work: Worker that handles tearing down the ThunderboltIP
168 * @rx_hdr: Copy of the currently processed Rx frame. Used when a
169 * network packet consists of multiple Thunderbolt frames.
170 * In host byte order.
171 * @rx_ring: Software ring holding Rx frames
172 * @frame_id: Frame ID use for next Tx packet
173 * (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
174 * @tx_ring: Software ring holding Tx frames
177 const struct tb_service
*svc
;
178 struct tb_xdomain
*xd
;
179 struct tb_protocol_handler handler
;
180 struct net_device
*dev
;
181 struct napi_struct napi
;
182 struct tbnet_stats stats
;
187 int local_transmit_path
;
188 int remote_transmit_path
;
189 struct mutex connection_lock
;
191 struct delayed_work login_work
;
192 struct work_struct connected_work
;
193 struct work_struct disconnect_work
;
194 struct thunderbolt_ip_frame_header rx_hdr
;
195 struct tbnet_ring rx_ring
;
197 struct tbnet_ring tx_ring
;
200 /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
201 static const uuid_t tbnet_dir_uuid
=
202 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
203 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
205 /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
206 static const uuid_t tbnet_svc_uuid
=
207 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
208 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
210 static struct tb_property_dir
*tbnet_dir
;
212 static void tbnet_fill_header(struct thunderbolt_ip_header
*hdr
, u64 route
,
213 u8 sequence
, const uuid_t
*initiator_uuid
, const uuid_t
*target_uuid
,
214 enum thunderbolt_ip_type type
, size_t size
, u32 command_id
)
218 /* Length does not include route_hi/lo and length_sn fields */
219 length_sn
= (size
- 3 * 4) / 4;
220 length_sn
|= (sequence
<< TBIP_HDR_SN_SHIFT
) & TBIP_HDR_SN_MASK
;
222 hdr
->route_hi
= upper_32_bits(route
);
223 hdr
->route_lo
= lower_32_bits(route
);
224 hdr
->length_sn
= length_sn
;
225 uuid_copy(&hdr
->uuid
, &tbnet_svc_uuid
);
226 uuid_copy(&hdr
->initiator_uuid
, initiator_uuid
);
227 uuid_copy(&hdr
->target_uuid
, target_uuid
);
229 hdr
->command_id
= command_id
;
232 static int tbnet_login_response(struct tbnet
*net
, u64 route
, u8 sequence
,
235 struct thunderbolt_ip_login_response reply
;
236 struct tb_xdomain
*xd
= net
->xd
;
238 memset(&reply
, 0, sizeof(reply
));
239 tbnet_fill_header(&reply
.hdr
, route
, sequence
, xd
->local_uuid
,
240 xd
->remote_uuid
, TBIP_LOGIN_RESPONSE
, sizeof(reply
),
242 memcpy(reply
.receiver_mac
, net
->dev
->dev_addr
, ETH_ALEN
);
243 reply
.receiver_mac_len
= ETH_ALEN
;
245 return tb_xdomain_response(xd
, &reply
, sizeof(reply
),
246 TB_CFG_PKG_XDOMAIN_RESP
);
249 static int tbnet_login_request(struct tbnet
*net
, u8 sequence
)
251 struct thunderbolt_ip_login_response reply
;
252 struct thunderbolt_ip_login request
;
253 struct tb_xdomain
*xd
= net
->xd
;
255 memset(&request
, 0, sizeof(request
));
256 tbnet_fill_header(&request
.hdr
, xd
->route
, sequence
, xd
->local_uuid
,
257 xd
->remote_uuid
, TBIP_LOGIN
, sizeof(request
),
258 atomic_inc_return(&net
->command_id
));
260 request
.proto_version
= TBIP_LOGIN_PROTO_VERSION
;
261 request
.transmit_path
= net
->local_transmit_path
;
263 return tb_xdomain_request(xd
, &request
, sizeof(request
),
264 TB_CFG_PKG_XDOMAIN_RESP
, &reply
,
265 sizeof(reply
), TB_CFG_PKG_XDOMAIN_RESP
,
266 TBNET_LOGIN_TIMEOUT
);
269 static int tbnet_logout_response(struct tbnet
*net
, u64 route
, u8 sequence
,
272 struct thunderbolt_ip_status reply
;
273 struct tb_xdomain
*xd
= net
->xd
;
275 memset(&reply
, 0, sizeof(reply
));
276 tbnet_fill_header(&reply
.hdr
, route
, sequence
, xd
->local_uuid
,
277 xd
->remote_uuid
, TBIP_STATUS
, sizeof(reply
),
278 atomic_inc_return(&net
->command_id
));
279 return tb_xdomain_response(xd
, &reply
, sizeof(reply
),
280 TB_CFG_PKG_XDOMAIN_RESP
);
283 static int tbnet_logout_request(struct tbnet
*net
)
285 struct thunderbolt_ip_logout request
;
286 struct thunderbolt_ip_status reply
;
287 struct tb_xdomain
*xd
= net
->xd
;
289 memset(&request
, 0, sizeof(request
));
290 tbnet_fill_header(&request
.hdr
, xd
->route
, 0, xd
->local_uuid
,
291 xd
->remote_uuid
, TBIP_LOGOUT
, sizeof(request
),
292 atomic_inc_return(&net
->command_id
));
294 return tb_xdomain_request(xd
, &request
, sizeof(request
),
295 TB_CFG_PKG_XDOMAIN_RESP
, &reply
,
296 sizeof(reply
), TB_CFG_PKG_XDOMAIN_RESP
,
297 TBNET_LOGOUT_TIMEOUT
);
300 static void start_login(struct tbnet
*net
)
302 mutex_lock(&net
->connection_lock
);
303 net
->login_sent
= false;
304 net
->login_received
= false;
305 mutex_unlock(&net
->connection_lock
);
307 queue_delayed_work(system_long_wq
, &net
->login_work
,
308 msecs_to_jiffies(1000));
311 static void stop_login(struct tbnet
*net
)
313 cancel_delayed_work_sync(&net
->login_work
);
314 cancel_work_sync(&net
->connected_work
);
317 static inline unsigned int tbnet_frame_size(const struct tbnet_frame
*tf
)
319 return tf
->frame
.size
? : TBNET_FRAME_SIZE
;
322 static void tbnet_free_buffers(struct tbnet_ring
*ring
)
326 for (i
= 0; i
< TBNET_RING_SIZE
; i
++) {
327 struct device
*dma_dev
= tb_ring_dma_device(ring
->ring
);
328 struct tbnet_frame
*tf
= &ring
->frames
[i
];
329 enum dma_data_direction dir
;
336 if (ring
->ring
->is_tx
) {
339 size
= TBNET_FRAME_SIZE
;
341 dir
= DMA_FROM_DEVICE
;
342 order
= TBNET_RX_PAGE_ORDER
;
343 size
= TBNET_RX_PAGE_SIZE
;
346 if (tf
->frame
.buffer_phy
)
347 dma_unmap_page(dma_dev
, tf
->frame
.buffer_phy
, size
,
350 __free_pages(tf
->page
, order
);
358 static void tbnet_tear_down(struct tbnet
*net
, bool send_logout
)
360 netif_carrier_off(net
->dev
);
361 netif_stop_queue(net
->dev
);
365 mutex_lock(&net
->connection_lock
);
367 if (net
->login_sent
&& net
->login_received
) {
368 int ret
, retries
= TBNET_LOGOUT_RETRIES
;
370 while (send_logout
&& retries
-- > 0) {
371 ret
= tbnet_logout_request(net
);
372 if (ret
!= -ETIMEDOUT
)
376 tb_ring_stop(net
->rx_ring
.ring
);
377 tb_ring_stop(net
->tx_ring
.ring
);
378 tbnet_free_buffers(&net
->rx_ring
);
379 tbnet_free_buffers(&net
->tx_ring
);
381 ret
= tb_xdomain_disable_paths(net
->xd
,
382 net
->local_transmit_path
,
383 net
->rx_ring
.ring
->hop
,
384 net
->remote_transmit_path
,
385 net
->tx_ring
.ring
->hop
);
387 netdev_warn(net
->dev
, "failed to disable DMA paths\n");
389 tb_xdomain_release_in_hopid(net
->xd
, net
->remote_transmit_path
);
390 net
->remote_transmit_path
= 0;
393 net
->login_retries
= 0;
394 net
->login_sent
= false;
395 net
->login_received
= false;
397 mutex_unlock(&net
->connection_lock
);
400 static int tbnet_handle_packet(const void *buf
, size_t size
, void *data
)
402 const struct thunderbolt_ip_login
*pkg
= buf
;
403 struct tbnet
*net
= data
;
409 /* Make sure the packet is for us */
410 if (size
< sizeof(struct thunderbolt_ip_header
))
412 if (!uuid_equal(&pkg
->hdr
.initiator_uuid
, net
->xd
->remote_uuid
))
414 if (!uuid_equal(&pkg
->hdr
.target_uuid
, net
->xd
->local_uuid
))
417 route
= ((u64
)pkg
->hdr
.route_hi
<< 32) | pkg
->hdr
.route_lo
;
418 route
&= ~BIT_ULL(63);
419 if (route
!= net
->xd
->route
)
422 sequence
= pkg
->hdr
.length_sn
& TBIP_HDR_SN_MASK
;
423 sequence
>>= TBIP_HDR_SN_SHIFT
;
424 command_id
= pkg
->hdr
.command_id
;
426 switch (pkg
->hdr
.type
) {
428 if (!netif_running(net
->dev
))
431 ret
= tbnet_login_response(net
, route
, sequence
,
432 pkg
->hdr
.command_id
);
434 mutex_lock(&net
->connection_lock
);
435 net
->login_received
= true;
436 net
->remote_transmit_path
= pkg
->transmit_path
;
438 /* If we reached the number of max retries or
439 * previous logout, schedule another round of
442 if (net
->login_retries
>= TBNET_LOGIN_RETRIES
||
444 net
->login_retries
= 0;
445 queue_delayed_work(system_long_wq
,
446 &net
->login_work
, 0);
448 mutex_unlock(&net
->connection_lock
);
450 queue_work(system_long_wq
, &net
->connected_work
);
455 ret
= tbnet_logout_response(net
, route
, sequence
, command_id
);
457 queue_work(system_long_wq
, &net
->disconnect_work
);
465 netdev_warn(net
->dev
, "failed to send ThunderboltIP response\n");
470 static unsigned int tbnet_available_buffers(const struct tbnet_ring
*ring
)
472 return ring
->prod
- ring
->cons
;
475 static int tbnet_alloc_rx_buffers(struct tbnet
*net
, unsigned int nbuffers
)
477 struct tbnet_ring
*ring
= &net
->rx_ring
;
481 struct device
*dma_dev
= tb_ring_dma_device(ring
->ring
);
482 unsigned int index
= ring
->prod
& (TBNET_RING_SIZE
- 1);
483 struct tbnet_frame
*tf
= &ring
->frames
[index
];
489 /* Allocate page (order > 0) so that it can hold maximum
490 * ThunderboltIP frame (4kB) and the additional room for
491 * SKB shared info required by build_skb().
493 tf
->page
= dev_alloc_pages(TBNET_RX_PAGE_ORDER
);
499 dma_addr
= dma_map_page(dma_dev
, tf
->page
, 0,
500 TBNET_RX_PAGE_SIZE
, DMA_FROM_DEVICE
);
501 if (dma_mapping_error(dma_dev
, dma_addr
)) {
506 tf
->frame
.buffer_phy
= dma_addr
;
509 tb_ring_rx(ring
->ring
, &tf
->frame
);
517 tbnet_free_buffers(ring
);
521 static struct tbnet_frame
*tbnet_get_tx_buffer(struct tbnet
*net
)
523 struct tbnet_ring
*ring
= &net
->tx_ring
;
524 struct device
*dma_dev
= tb_ring_dma_device(ring
->ring
);
525 struct tbnet_frame
*tf
;
528 if (!tbnet_available_buffers(ring
))
531 index
= ring
->cons
++ & (TBNET_RING_SIZE
- 1);
533 tf
= &ring
->frames
[index
];
536 dma_sync_single_for_cpu(dma_dev
, tf
->frame
.buffer_phy
,
537 tbnet_frame_size(tf
), DMA_TO_DEVICE
);
542 static void tbnet_tx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
545 struct tbnet_frame
*tf
= container_of(frame
, typeof(*tf
), frame
);
546 struct tbnet
*net
= netdev_priv(tf
->dev
);
548 /* Return buffer to the ring */
551 if (tbnet_available_buffers(&net
->tx_ring
) >= TBNET_RING_SIZE
/ 2)
552 netif_wake_queue(net
->dev
);
555 static int tbnet_alloc_tx_buffers(struct tbnet
*net
)
557 struct tbnet_ring
*ring
= &net
->tx_ring
;
558 struct device
*dma_dev
= tb_ring_dma_device(ring
->ring
);
561 for (i
= 0; i
< TBNET_RING_SIZE
; i
++) {
562 struct tbnet_frame
*tf
= &ring
->frames
[i
];
565 tf
->page
= alloc_page(GFP_KERNEL
);
567 tbnet_free_buffers(ring
);
571 dma_addr
= dma_map_page(dma_dev
, tf
->page
, 0, TBNET_FRAME_SIZE
,
573 if (dma_mapping_error(dma_dev
, dma_addr
)) {
574 __free_page(tf
->page
);
576 tbnet_free_buffers(ring
);
581 tf
->frame
.buffer_phy
= dma_addr
;
582 tf
->frame
.callback
= tbnet_tx_callback
;
583 tf
->frame
.sof
= TBIP_PDF_FRAME_START
;
584 tf
->frame
.eof
= TBIP_PDF_FRAME_END
;
588 ring
->prod
= TBNET_RING_SIZE
- 1;
593 static void tbnet_connected_work(struct work_struct
*work
)
595 struct tbnet
*net
= container_of(work
, typeof(*net
), connected_work
);
599 if (netif_carrier_ok(net
->dev
))
602 mutex_lock(&net
->connection_lock
);
603 connected
= net
->login_sent
&& net
->login_received
;
604 mutex_unlock(&net
->connection_lock
);
609 ret
= tb_xdomain_alloc_in_hopid(net
->xd
, net
->remote_transmit_path
);
610 if (ret
!= net
->remote_transmit_path
) {
611 netdev_err(net
->dev
, "failed to allocate Rx HopID\n");
615 /* Both logins successful so enable the high-speed DMA paths and
616 * start the network device queue.
618 ret
= tb_xdomain_enable_paths(net
->xd
, net
->local_transmit_path
,
619 net
->rx_ring
.ring
->hop
,
620 net
->remote_transmit_path
,
621 net
->tx_ring
.ring
->hop
);
623 netdev_err(net
->dev
, "failed to enable DMA paths\n");
627 tb_ring_start(net
->tx_ring
.ring
);
628 tb_ring_start(net
->rx_ring
.ring
);
630 ret
= tbnet_alloc_rx_buffers(net
, TBNET_RING_SIZE
);
634 ret
= tbnet_alloc_tx_buffers(net
);
636 goto err_free_rx_buffers
;
638 netif_carrier_on(net
->dev
);
639 netif_start_queue(net
->dev
);
643 tbnet_free_buffers(&net
->rx_ring
);
645 tb_ring_stop(net
->rx_ring
.ring
);
646 tb_ring_stop(net
->tx_ring
.ring
);
647 tb_xdomain_release_in_hopid(net
->xd
, net
->remote_transmit_path
);
650 static void tbnet_login_work(struct work_struct
*work
)
652 struct tbnet
*net
= container_of(work
, typeof(*net
), login_work
.work
);
653 unsigned long delay
= msecs_to_jiffies(TBNET_LOGIN_DELAY
);
656 if (netif_carrier_ok(net
->dev
))
659 ret
= tbnet_login_request(net
, net
->login_retries
% 4);
661 if (net
->login_retries
++ < TBNET_LOGIN_RETRIES
) {
662 queue_delayed_work(system_long_wq
, &net
->login_work
,
665 netdev_info(net
->dev
, "ThunderboltIP login timed out\n");
668 net
->login_retries
= 0;
670 mutex_lock(&net
->connection_lock
);
671 net
->login_sent
= true;
672 mutex_unlock(&net
->connection_lock
);
674 queue_work(system_long_wq
, &net
->connected_work
);
678 static void tbnet_disconnect_work(struct work_struct
*work
)
680 struct tbnet
*net
= container_of(work
, typeof(*net
), disconnect_work
);
682 tbnet_tear_down(net
, false);
685 static bool tbnet_check_frame(struct tbnet
*net
, const struct tbnet_frame
*tf
,
686 const struct thunderbolt_ip_frame_header
*hdr
)
688 u32 frame_id
, frame_count
, frame_size
, frame_index
;
691 if (tf
->frame
.flags
& RING_DESC_CRC_ERROR
) {
692 net
->stats
.rx_crc_errors
++;
694 } else if (tf
->frame
.flags
& RING_DESC_BUFFER_OVERRUN
) {
695 net
->stats
.rx_over_errors
++;
699 /* Should be greater than just header i.e. contains data */
700 size
= tbnet_frame_size(tf
);
701 if (size
<= sizeof(*hdr
)) {
702 net
->stats
.rx_length_errors
++;
706 frame_count
= le32_to_cpu(hdr
->frame_count
);
707 frame_size
= le32_to_cpu(hdr
->frame_size
);
708 frame_index
= le16_to_cpu(hdr
->frame_index
);
709 frame_id
= le16_to_cpu(hdr
->frame_id
);
711 if ((frame_size
> size
- sizeof(*hdr
)) || !frame_size
) {
712 net
->stats
.rx_length_errors
++;
716 /* In case we're in the middle of packet, validate the frame
717 * header based on first fragment of the packet.
719 if (net
->skb
&& net
->rx_hdr
.frame_count
) {
720 /* Check the frame count fits the count field */
721 if (frame_count
!= net
->rx_hdr
.frame_count
) {
722 net
->stats
.rx_length_errors
++;
726 /* Check the frame identifiers are incremented correctly,
727 * and id is matching.
729 if (frame_index
!= net
->rx_hdr
.frame_index
+ 1 ||
730 frame_id
!= net
->rx_hdr
.frame_id
) {
731 net
->stats
.rx_missed_errors
++;
735 if (net
->skb
->len
+ frame_size
> TBNET_MAX_MTU
) {
736 net
->stats
.rx_length_errors
++;
743 /* Start of packet, validate the frame header */
744 if (frame_count
== 0 || frame_count
> TBNET_RING_SIZE
/ 4) {
745 net
->stats
.rx_length_errors
++;
748 if (frame_index
!= 0) {
749 net
->stats
.rx_missed_errors
++;
756 static int tbnet_poll(struct napi_struct
*napi
, int budget
)
758 struct tbnet
*net
= container_of(napi
, struct tbnet
, napi
);
759 unsigned int cleaned_count
= tbnet_available_buffers(&net
->rx_ring
);
760 struct device
*dma_dev
= tb_ring_dma_device(net
->rx_ring
.ring
);
761 unsigned int rx_packets
= 0;
763 while (rx_packets
< budget
) {
764 const struct thunderbolt_ip_frame_header
*hdr
;
765 unsigned int hdr_size
= sizeof(*hdr
);
766 struct sk_buff
*skb
= NULL
;
767 struct ring_frame
*frame
;
768 struct tbnet_frame
*tf
;
773 /* Return some buffers to hardware, one at a time is too
774 * slow so allocate MAX_SKB_FRAGS buffers at the same
777 if (cleaned_count
>= MAX_SKB_FRAGS
) {
778 tbnet_alloc_rx_buffers(net
, cleaned_count
);
782 frame
= tb_ring_poll(net
->rx_ring
.ring
);
786 dma_unmap_page(dma_dev
, frame
->buffer_phy
,
787 TBNET_RX_PAGE_SIZE
, DMA_FROM_DEVICE
);
789 tf
= container_of(frame
, typeof(*tf
), frame
);
796 hdr
= page_address(page
);
797 if (!tbnet_check_frame(net
, tf
, hdr
)) {
798 __free_pages(page
, TBNET_RX_PAGE_ORDER
);
799 dev_kfree_skb_any(net
->skb
);
804 frame_size
= le32_to_cpu(hdr
->frame_size
);
808 skb
= build_skb(page_address(page
),
811 __free_pages(page
, TBNET_RX_PAGE_ORDER
);
812 net
->stats
.rx_errors
++;
816 skb_reserve(skb
, hdr_size
);
817 skb_put(skb
, frame_size
);
821 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
822 page
, hdr_size
, frame_size
,
823 TBNET_RX_PAGE_SIZE
- hdr_size
);
826 net
->rx_hdr
.frame_size
= frame_size
;
827 net
->rx_hdr
.frame_count
= le32_to_cpu(hdr
->frame_count
);
828 net
->rx_hdr
.frame_index
= le16_to_cpu(hdr
->frame_index
);
829 net
->rx_hdr
.frame_id
= le16_to_cpu(hdr
->frame_id
);
830 last
= net
->rx_hdr
.frame_index
== net
->rx_hdr
.frame_count
- 1;
833 net
->stats
.rx_bytes
+= frame_size
;
836 skb
->protocol
= eth_type_trans(skb
, net
->dev
);
837 napi_gro_receive(&net
->napi
, skb
);
842 net
->stats
.rx_packets
+= rx_packets
;
845 tbnet_alloc_rx_buffers(net
, cleaned_count
);
847 if (rx_packets
>= budget
)
850 napi_complete_done(napi
, rx_packets
);
851 /* Re-enable the ring interrupt */
852 tb_ring_poll_complete(net
->rx_ring
.ring
);
857 static void tbnet_start_poll(void *data
)
859 struct tbnet
*net
= data
;
861 napi_schedule(&net
->napi
);
864 static int tbnet_open(struct net_device
*dev
)
866 struct tbnet
*net
= netdev_priv(dev
);
867 struct tb_xdomain
*xd
= net
->xd
;
868 u16 sof_mask
, eof_mask
;
869 struct tb_ring
*ring
;
872 netif_carrier_off(dev
);
874 ring
= tb_ring_alloc_tx(xd
->tb
->nhi
, -1, TBNET_RING_SIZE
,
877 netdev_err(dev
, "failed to allocate Tx ring\n");
880 net
->tx_ring
.ring
= ring
;
882 hopid
= tb_xdomain_alloc_out_hopid(xd
, -1);
884 netdev_err(dev
, "failed to allocate Tx HopID\n");
885 tb_ring_free(net
->tx_ring
.ring
);
886 net
->tx_ring
.ring
= NULL
;
889 net
->local_transmit_path
= hopid
;
891 sof_mask
= BIT(TBIP_PDF_FRAME_START
);
892 eof_mask
= BIT(TBIP_PDF_FRAME_END
);
894 ring
= tb_ring_alloc_rx(xd
->tb
->nhi
, -1, TBNET_RING_SIZE
,
895 RING_FLAG_FRAME
, 0, sof_mask
, eof_mask
,
896 tbnet_start_poll
, net
);
898 netdev_err(dev
, "failed to allocate Rx ring\n");
899 tb_ring_free(net
->tx_ring
.ring
);
900 net
->tx_ring
.ring
= NULL
;
903 net
->rx_ring
.ring
= ring
;
905 napi_enable(&net
->napi
);
911 static int tbnet_stop(struct net_device
*dev
)
913 struct tbnet
*net
= netdev_priv(dev
);
915 napi_disable(&net
->napi
);
917 cancel_work_sync(&net
->disconnect_work
);
918 tbnet_tear_down(net
, true);
920 tb_ring_free(net
->rx_ring
.ring
);
921 net
->rx_ring
.ring
= NULL
;
923 tb_xdomain_release_out_hopid(net
->xd
, net
->local_transmit_path
);
924 tb_ring_free(net
->tx_ring
.ring
);
925 net
->tx_ring
.ring
= NULL
;
930 static bool tbnet_xmit_csum_and_map(struct tbnet
*net
, struct sk_buff
*skb
,
931 struct tbnet_frame
**frames
, u32 frame_count
)
933 struct thunderbolt_ip_frame_header
*hdr
= page_address(frames
[0]->page
);
934 struct device
*dma_dev
= tb_ring_dma_device(net
->tx_ring
.ring
);
935 __wsum wsum
= htonl(skb
->len
- skb_transport_offset(skb
));
936 unsigned int i
, len
, offset
= skb_transport_offset(skb
);
937 __be16 protocol
= skb
->protocol
;
938 void *data
= skb
->data
;
939 void *dest
= hdr
+ 1;
942 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
943 /* No need to calculate checksum so we just update the
944 * total frame count and sync the frames for DMA.
946 for (i
= 0; i
< frame_count
; i
++) {
947 hdr
= page_address(frames
[i
]->page
);
948 hdr
->frame_count
= cpu_to_le32(frame_count
);
949 dma_sync_single_for_device(dma_dev
,
950 frames
[i
]->frame
.buffer_phy
,
951 tbnet_frame_size(frames
[i
]), DMA_TO_DEVICE
);
957 if (protocol
== htons(ETH_P_8021Q
)) {
958 struct vlan_hdr
*vhdr
, vh
;
960 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(vh
), &vh
);
964 protocol
= vhdr
->h_vlan_encapsulated_proto
;
967 /* Data points on the beginning of packet.
968 * Check is the checksum absolute place in the packet.
969 * ipcso will update IP checksum.
970 * tucso will update TCP/UPD checksum.
972 if (protocol
== htons(ETH_P_IP
)) {
973 __sum16
*ipcso
= dest
+ ((void *)&(ip_hdr(skb
)->check
) - data
);
976 *ipcso
= ip_fast_csum(dest
+ skb_network_offset(skb
),
979 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
980 tucso
= dest
+ ((void *)&(tcp_hdr(skb
)->check
) - data
);
981 else if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
)
982 tucso
= dest
+ ((void *)&(udp_hdr(skb
)->check
) - data
);
986 *tucso
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
987 ip_hdr(skb
)->daddr
, 0,
988 ip_hdr(skb
)->protocol
, 0);
989 } else if (skb_is_gso_v6(skb
)) {
990 tucso
= dest
+ ((void *)&(tcp_hdr(skb
)->check
) - data
);
991 *tucso
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
992 &ipv6_hdr(skb
)->daddr
, 0,
995 } else if (protocol
== htons(ETH_P_IPV6
)) {
996 tucso
= dest
+ skb_checksum_start_offset(skb
) + skb
->csum_offset
;
997 *tucso
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
998 &ipv6_hdr(skb
)->daddr
, 0,
999 ipv6_hdr(skb
)->nexthdr
, 0);
1004 /* First frame was headers, rest of the frames contain data.
1005 * Calculate checksum over each frame.
1007 for (i
= 0; i
< frame_count
; i
++) {
1008 hdr
= page_address(frames
[i
]->page
);
1009 dest
= (void *)(hdr
+ 1) + offset
;
1010 len
= le32_to_cpu(hdr
->frame_size
) - offset
;
1011 wsum
= csum_partial(dest
, len
, wsum
);
1012 hdr
->frame_count
= cpu_to_le32(frame_count
);
1017 *tucso
= csum_fold(wsum
);
1019 /* Checksum is finally calculated and we don't touch the memory
1020 * anymore, so DMA sync the frames now.
1022 for (i
= 0; i
< frame_count
; i
++) {
1023 dma_sync_single_for_device(dma_dev
, frames
[i
]->frame
.buffer_phy
,
1024 tbnet_frame_size(frames
[i
]), DMA_TO_DEVICE
);
1030 static void *tbnet_kmap_frag(struct sk_buff
*skb
, unsigned int frag_num
,
1033 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_num
];
1035 *len
= skb_frag_size(frag
);
1036 return kmap_atomic(skb_frag_page(frag
)) + skb_frag_off(frag
);
1039 static netdev_tx_t
tbnet_start_xmit(struct sk_buff
*skb
,
1040 struct net_device
*dev
)
1042 struct tbnet
*net
= netdev_priv(dev
);
1043 struct tbnet_frame
*frames
[MAX_SKB_FRAGS
];
1044 u16 frame_id
= atomic_read(&net
->frame_id
);
1045 struct thunderbolt_ip_frame_header
*hdr
;
1046 unsigned int len
= skb_headlen(skb
);
1047 unsigned int data_len
= skb
->len
;
1048 unsigned int nframes
, i
;
1049 unsigned int frag
= 0;
1050 void *src
= skb
->data
;
1051 u32 frame_index
= 0;
1055 nframes
= DIV_ROUND_UP(data_len
, TBNET_MAX_PAYLOAD_SIZE
);
1056 if (tbnet_available_buffers(&net
->tx_ring
) < nframes
) {
1057 netif_stop_queue(net
->dev
);
1058 return NETDEV_TX_BUSY
;
1061 frames
[frame_index
] = tbnet_get_tx_buffer(net
);
1062 if (!frames
[frame_index
])
1065 hdr
= page_address(frames
[frame_index
]->page
);
1068 /* If overall packet is bigger than the frame data size */
1069 while (data_len
> TBNET_MAX_PAYLOAD_SIZE
) {
1070 unsigned int size_left
= TBNET_MAX_PAYLOAD_SIZE
;
1072 hdr
->frame_size
= cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE
);
1073 hdr
->frame_index
= cpu_to_le16(frame_index
);
1074 hdr
->frame_id
= cpu_to_le16(frame_id
);
1077 if (len
> size_left
) {
1078 /* Copy data onto Tx buffer data with
1079 * full frame size then break and go to
1082 memcpy(dest
, src
, size_left
);
1089 memcpy(dest
, src
, len
);
1098 /* Ensure all fragments have been processed */
1099 if (frag
< skb_shinfo(skb
)->nr_frags
) {
1100 /* Map and then unmap quickly */
1101 src
= tbnet_kmap_frag(skb
, frag
++, &len
);
1103 } else if (unlikely(size_left
> 0)) {
1106 } while (size_left
> 0);
1108 data_len
-= TBNET_MAX_PAYLOAD_SIZE
;
1111 frames
[frame_index
] = tbnet_get_tx_buffer(net
);
1112 if (!frames
[frame_index
])
1115 hdr
= page_address(frames
[frame_index
]->page
);
1119 hdr
->frame_size
= cpu_to_le32(data_len
);
1120 hdr
->frame_index
= cpu_to_le16(frame_index
);
1121 hdr
->frame_id
= cpu_to_le16(frame_id
);
1123 frames
[frame_index
]->frame
.size
= data_len
+ sizeof(*hdr
);
1125 /* In case the remaining data_len is smaller than a frame */
1126 while (len
< data_len
) {
1127 memcpy(dest
, src
, len
);
1136 if (frag
< skb_shinfo(skb
)->nr_frags
) {
1137 src
= tbnet_kmap_frag(skb
, frag
++, &len
);
1139 } else if (unlikely(data_len
> 0)) {
1144 memcpy(dest
, src
, data_len
);
1149 if (!tbnet_xmit_csum_and_map(net
, skb
, frames
, frame_index
+ 1))
1152 for (i
= 0; i
< frame_index
+ 1; i
++)
1153 tb_ring_tx(net
->tx_ring
.ring
, &frames
[i
]->frame
);
1155 if (net
->svc
->prtcstns
& TBNET_MATCH_FRAGS_ID
)
1156 atomic_inc(&net
->frame_id
);
1158 net
->stats
.tx_packets
++;
1159 net
->stats
.tx_bytes
+= skb
->len
;
1161 dev_consume_skb_any(skb
);
1163 return NETDEV_TX_OK
;
1166 /* We can re-use the buffers */
1167 net
->tx_ring
.cons
-= frame_index
;
1169 dev_kfree_skb_any(skb
);
1170 net
->stats
.tx_errors
++;
1172 return NETDEV_TX_OK
;
1175 static void tbnet_get_stats64(struct net_device
*dev
,
1176 struct rtnl_link_stats64
*stats
)
1178 struct tbnet
*net
= netdev_priv(dev
);
1180 stats
->tx_packets
= net
->stats
.tx_packets
;
1181 stats
->rx_packets
= net
->stats
.rx_packets
;
1182 stats
->tx_bytes
= net
->stats
.tx_bytes
;
1183 stats
->rx_bytes
= net
->stats
.rx_bytes
;
1184 stats
->rx_errors
= net
->stats
.rx_errors
+ net
->stats
.rx_length_errors
+
1185 net
->stats
.rx_over_errors
+ net
->stats
.rx_crc_errors
+
1186 net
->stats
.rx_missed_errors
;
1187 stats
->tx_errors
= net
->stats
.tx_errors
;
1188 stats
->rx_length_errors
= net
->stats
.rx_length_errors
;
1189 stats
->rx_over_errors
= net
->stats
.rx_over_errors
;
1190 stats
->rx_crc_errors
= net
->stats
.rx_crc_errors
;
1191 stats
->rx_missed_errors
= net
->stats
.rx_missed_errors
;
1194 static const struct net_device_ops tbnet_netdev_ops
= {
1195 .ndo_open
= tbnet_open
,
1196 .ndo_stop
= tbnet_stop
,
1197 .ndo_start_xmit
= tbnet_start_xmit
,
1198 .ndo_get_stats64
= tbnet_get_stats64
,
1201 static void tbnet_generate_mac(struct net_device
*dev
)
1203 const struct tbnet
*net
= netdev_priv(dev
);
1204 const struct tb_xdomain
*xd
= net
->xd
;
1208 phy_port
= tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd
->route
));
1210 /* Unicast and locally administered MAC */
1211 dev
->dev_addr
[0] = phy_port
<< 4 | 0x02;
1212 hash
= jhash2((u32
*)xd
->local_uuid
, 4, 0);
1213 memcpy(dev
->dev_addr
+ 1, &hash
, sizeof(hash
));
1214 hash
= jhash2((u32
*)xd
->local_uuid
, 4, hash
);
1215 dev
->dev_addr
[5] = hash
& 0xff;
1218 static int tbnet_probe(struct tb_service
*svc
, const struct tb_service_id
*id
)
1220 struct tb_xdomain
*xd
= tb_service_parent(svc
);
1221 struct net_device
*dev
;
1225 dev
= alloc_etherdev(sizeof(*net
));
1229 SET_NETDEV_DEV(dev
, &svc
->dev
);
1231 net
= netdev_priv(dev
);
1232 INIT_DELAYED_WORK(&net
->login_work
, tbnet_login_work
);
1233 INIT_WORK(&net
->connected_work
, tbnet_connected_work
);
1234 INIT_WORK(&net
->disconnect_work
, tbnet_disconnect_work
);
1235 mutex_init(&net
->connection_lock
);
1236 atomic_set(&net
->command_id
, 0);
1237 atomic_set(&net
->frame_id
, 0);
1242 tbnet_generate_mac(dev
);
1244 strcpy(dev
->name
, "thunderbolt%d");
1245 dev
->netdev_ops
= &tbnet_netdev_ops
;
1247 /* ThunderboltIP takes advantage of TSO packets but instead of
1248 * segmenting them we just split the packet into Thunderbolt
1249 * frames (maximum payload size of each frame is 4084 bytes) and
1250 * calculate checksum over the whole packet here.
1252 * The receiving side does the opposite if the host OS supports
1253 * LRO, otherwise it needs to split the large packet into MTU
1254 * sized smaller packets.
1256 * In order to receive large packets from the networking stack,
1257 * we need to announce support for most of the offloading
1260 dev
->hw_features
= NETIF_F_SG
| NETIF_F_ALL_TSO
| NETIF_F_GRO
|
1261 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
1262 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
;
1263 dev
->hard_header_len
+= sizeof(struct thunderbolt_ip_frame_header
);
1265 netif_napi_add(dev
, &net
->napi
, tbnet_poll
, NAPI_POLL_WEIGHT
);
1267 /* MTU range: 68 - 65522 */
1268 dev
->min_mtu
= ETH_MIN_MTU
;
1269 dev
->max_mtu
= TBNET_MAX_MTU
- ETH_HLEN
;
1271 net
->handler
.uuid
= &tbnet_svc_uuid
;
1272 net
->handler
.callback
= tbnet_handle_packet
;
1273 net
->handler
.data
= net
;
1274 tb_register_protocol_handler(&net
->handler
);
1276 tb_service_set_drvdata(svc
, net
);
1278 ret
= register_netdev(dev
);
1280 tb_unregister_protocol_handler(&net
->handler
);
1288 static void tbnet_remove(struct tb_service
*svc
)
1290 struct tbnet
*net
= tb_service_get_drvdata(svc
);
1292 unregister_netdev(net
->dev
);
1293 tb_unregister_protocol_handler(&net
->handler
);
1294 free_netdev(net
->dev
);
1297 static void tbnet_shutdown(struct tb_service
*svc
)
1299 tbnet_tear_down(tb_service_get_drvdata(svc
), true);
1302 static int __maybe_unused
tbnet_suspend(struct device
*dev
)
1304 struct tb_service
*svc
= tb_to_service(dev
);
1305 struct tbnet
*net
= tb_service_get_drvdata(svc
);
1308 if (netif_running(net
->dev
)) {
1309 netif_device_detach(net
->dev
);
1310 tbnet_tear_down(net
, true);
1313 tb_unregister_protocol_handler(&net
->handler
);
1317 static int __maybe_unused
tbnet_resume(struct device
*dev
)
1319 struct tb_service
*svc
= tb_to_service(dev
);
1320 struct tbnet
*net
= tb_service_get_drvdata(svc
);
1322 tb_register_protocol_handler(&net
->handler
);
1324 netif_carrier_off(net
->dev
);
1325 if (netif_running(net
->dev
)) {
1326 netif_device_attach(net
->dev
);
1333 static const struct dev_pm_ops tbnet_pm_ops
= {
1334 SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend
, tbnet_resume
)
1337 static const struct tb_service_id tbnet_ids
[] = {
1338 { TB_SERVICE("network", 1) },
1341 MODULE_DEVICE_TABLE(tbsvc
, tbnet_ids
);
1343 static struct tb_service_driver tbnet_driver
= {
1345 .owner
= THIS_MODULE
,
1346 .name
= "thunderbolt-net",
1347 .pm
= &tbnet_pm_ops
,
1349 .probe
= tbnet_probe
,
1350 .remove
= tbnet_remove
,
1351 .shutdown
= tbnet_shutdown
,
1352 .id_table
= tbnet_ids
,
1355 static int __init
tbnet_init(void)
1359 tbnet_dir
= tb_property_create_dir(&tbnet_dir_uuid
);
1363 tb_property_add_immediate(tbnet_dir
, "prtcid", 1);
1364 tb_property_add_immediate(tbnet_dir
, "prtcvers", 1);
1365 tb_property_add_immediate(tbnet_dir
, "prtcrevs", 1);
1366 /* Currently only announce support for match frags ID (bit 1). Bit 0
1367 * is reserved for full E2E flow control which we do not support at
1370 tb_property_add_immediate(tbnet_dir
, "prtcstns",
1371 TBNET_MATCH_FRAGS_ID
| TBNET_64K_FRAMES
);
1373 ret
= tb_register_property_dir("network", tbnet_dir
);
1375 tb_property_free_dir(tbnet_dir
);
1379 return tb_register_service_driver(&tbnet_driver
);
1381 module_init(tbnet_init
);
1383 static void __exit
tbnet_exit(void)
1385 tb_unregister_service_driver(&tbnet_driver
);
1386 tb_unregister_property_dir("network", tbnet_dir
);
1387 tb_property_free_dir(tbnet_dir
);
1389 module_exit(tbnet_exit
);
1391 MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1392 MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1393 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1394 MODULE_DESCRIPTION("Thunderbolt network driver");
1395 MODULE_LICENSE("GPL v2");