]>
Commit | Line | Data |
---|---|---|
e69b6c02 AL |
1 | /* |
2 | * Networking over Thunderbolt cable using Apple ThunderboltIP protocol | |
3 | * | |
4 | * Copyright (C) 2017, Intel Corporation | |
5 | * Authors: Amir Levy <amir.jer.levy@intel.com> | |
6 | * Michael Jamet <michael.jamet@intel.com> | |
7 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/atomic.h> | |
15 | #include <linux/highmem.h> | |
16 | #include <linux/if_vlan.h> | |
17 | #include <linux/jhash.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/etherdevice.h> | |
20 | #include <linux/rtnetlink.h> | |
21 | #include <linux/sizes.h> | |
22 | #include <linux/thunderbolt.h> | |
23 | #include <linux/uuid.h> | |
24 | #include <linux/workqueue.h> | |
25 | ||
26 | #include <net/ip6_checksum.h> | |
27 | ||
28 | /* Protocol timeouts in ms */ | |
29 | #define TBNET_LOGIN_DELAY 4500 | |
30 | #define TBNET_LOGIN_TIMEOUT 500 | |
31 | #define TBNET_LOGOUT_TIMEOUT 100 | |
32 | ||
33 | #define TBNET_RING_SIZE 256 | |
34 | #define TBNET_LOCAL_PATH 0xf | |
35 | #define TBNET_LOGIN_RETRIES 60 | |
36 | #define TBNET_LOGOUT_RETRIES 5 | |
37 | #define TBNET_MATCH_FRAGS_ID BIT(1) | |
38 | #define TBNET_MAX_MTU SZ_64K | |
39 | #define TBNET_FRAME_SIZE SZ_4K | |
40 | #define TBNET_MAX_PAYLOAD_SIZE \ | |
41 | (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header)) | |
42 | /* Rx packets need to hold space for skb_shared_info */ | |
43 | #define TBNET_RX_MAX_SIZE \ | |
44 | (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | |
45 | #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE) | |
46 | #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER) | |
47 | ||
48 | #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0)) | |
49 | ||
50 | /** | |
51 | * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame | |
52 | * @frame_size: size of the data with the frame | |
53 | * @frame_index: running index on the frames | |
54 | * @frame_id: ID of the frame to match frames to specific packet | |
55 | * @frame_count: how many frames assembles a full packet | |
56 | * | |
57 | * Each data frame passed to the high-speed DMA ring has this header. If | |
58 | * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is | |
59 | * supported then @frame_id is filled, otherwise it stays %0. | |
60 | */ | |
61 | struct thunderbolt_ip_frame_header { | |
62 | u32 frame_size; | |
63 | u16 frame_index; | |
64 | u16 frame_id; | |
65 | u32 frame_count; | |
66 | }; | |
67 | ||
68 | enum thunderbolt_ip_frame_pdf { | |
69 | TBIP_PDF_FRAME_START = 1, | |
70 | TBIP_PDF_FRAME_END, | |
71 | }; | |
72 | ||
73 | enum thunderbolt_ip_type { | |
74 | TBIP_LOGIN, | |
75 | TBIP_LOGIN_RESPONSE, | |
76 | TBIP_LOGOUT, | |
77 | TBIP_STATUS, | |
78 | }; | |
79 | ||
80 | struct thunderbolt_ip_header { | |
81 | u32 route_hi; | |
82 | u32 route_lo; | |
83 | u32 length_sn; | |
84 | uuid_t uuid; | |
85 | uuid_t initiator_uuid; | |
86 | uuid_t target_uuid; | |
87 | u32 type; | |
88 | u32 command_id; | |
89 | }; | |
90 | ||
91 | #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0) | |
92 | #define TBIP_HDR_SN_MASK GENMASK(28, 27) | |
93 | #define TBIP_HDR_SN_SHIFT 27 | |
94 | ||
95 | struct thunderbolt_ip_login { | |
96 | struct thunderbolt_ip_header hdr; | |
97 | u32 proto_version; | |
98 | u32 transmit_path; | |
99 | u32 reserved[4]; | |
100 | }; | |
101 | ||
102 | #define TBIP_LOGIN_PROTO_VERSION 1 | |
103 | ||
104 | struct thunderbolt_ip_login_response { | |
105 | struct thunderbolt_ip_header hdr; | |
106 | u32 status; | |
107 | u32 receiver_mac[2]; | |
108 | u32 receiver_mac_len; | |
109 | u32 reserved[4]; | |
110 | }; | |
111 | ||
112 | struct thunderbolt_ip_logout { | |
113 | struct thunderbolt_ip_header hdr; | |
114 | }; | |
115 | ||
116 | struct thunderbolt_ip_status { | |
117 | struct thunderbolt_ip_header hdr; | |
118 | u32 status; | |
119 | }; | |
120 | ||
121 | struct tbnet_stats { | |
122 | u64 tx_packets; | |
123 | u64 rx_packets; | |
124 | u64 tx_bytes; | |
125 | u64 rx_bytes; | |
126 | u64 rx_errors; | |
127 | u64 tx_errors; | |
128 | u64 rx_length_errors; | |
129 | u64 rx_over_errors; | |
130 | u64 rx_crc_errors; | |
131 | u64 rx_missed_errors; | |
132 | }; | |
133 | ||
134 | struct tbnet_frame { | |
135 | struct net_device *dev; | |
136 | struct page *page; | |
137 | struct ring_frame frame; | |
138 | }; | |
139 | ||
140 | struct tbnet_ring { | |
141 | struct tbnet_frame frames[TBNET_RING_SIZE]; | |
142 | unsigned int cons; | |
143 | unsigned int prod; | |
144 | struct tb_ring *ring; | |
145 | }; | |
146 | ||
147 | /** | |
148 | * struct tbnet - ThunderboltIP network driver private data | |
149 | * @svc: XDomain service the driver is bound to | |
150 | * @xd: XDomain the service blongs to | |
151 | * @handler: ThunderboltIP configuration protocol handler | |
152 | * @dev: Networking device | |
153 | * @napi: NAPI structure for Rx polling | |
154 | * @stats: Network statistics | |
155 | * @skb: Network packet that is currently processed on Rx path | |
156 | * @command_id: ID used for next configuration protocol packet | |
157 | * @login_sent: ThunderboltIP login message successfully sent | |
158 | * @login_received: ThunderboltIP login message received from the remote | |
159 | * host | |
160 | * @transmit_path: HopID the other end needs to use building the | |
161 | * opposite side path. | |
162 | * @connection_lock: Lock serializing access to @login_sent, | |
163 | * @login_received and @transmit_path. | |
164 | * @login_retries: Number of login retries currently done | |
165 | * @login_work: Worker to send ThunderboltIP login packets | |
166 | * @connected_work: Worker that finalizes the ThunderboltIP connection | |
167 | * setup and enables DMA paths for high speed data | |
168 | * transfers | |
169 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a | |
170 | * network packet consists of multiple Thunderbolt frames. | |
171 | * In host byte order. | |
172 | * @rx_ring: Software ring holding Rx frames | |
173 | * @frame_id: Frame ID use for next Tx packet | |
174 | * (if %TBNET_MATCH_FRAGS_ID is supported in both ends) | |
175 | * @tx_ring: Software ring holding Tx frames | |
176 | */ | |
177 | struct tbnet { | |
178 | const struct tb_service *svc; | |
179 | struct tb_xdomain *xd; | |
180 | struct tb_protocol_handler handler; | |
181 | struct net_device *dev; | |
182 | struct napi_struct napi; | |
183 | struct tbnet_stats stats; | |
184 | struct sk_buff *skb; | |
185 | atomic_t command_id; | |
186 | bool login_sent; | |
187 | bool login_received; | |
188 | u32 transmit_path; | |
189 | struct mutex connection_lock; | |
190 | int login_retries; | |
191 | struct delayed_work login_work; | |
192 | struct work_struct connected_work; | |
193 | struct thunderbolt_ip_frame_header rx_hdr; | |
194 | struct tbnet_ring rx_ring; | |
195 | atomic_t frame_id; | |
196 | struct tbnet_ring tx_ring; | |
197 | }; | |
198 | ||
199 | /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */ | |
200 | static const uuid_t tbnet_dir_uuid = | |
201 | UUID_INIT(0xc66189ca, 0x1cce, 0x4195, | |
202 | 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); | |
203 | ||
204 | /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */ | |
205 | static const uuid_t tbnet_svc_uuid = | |
206 | UUID_INIT(0x798f589e, 0x3616, 0x8a47, | |
207 | 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd); | |
208 | ||
209 | static struct tb_property_dir *tbnet_dir; | |
210 | ||
211 | static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route, | |
212 | u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid, | |
213 | enum thunderbolt_ip_type type, size_t size, u32 command_id) | |
214 | { | |
215 | u32 length_sn; | |
216 | ||
217 | /* Length does not include route_hi/lo and length_sn fields */ | |
218 | length_sn = (size - 3 * 4) / 4; | |
219 | length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK; | |
220 | ||
221 | hdr->route_hi = upper_32_bits(route); | |
222 | hdr->route_lo = lower_32_bits(route); | |
223 | hdr->length_sn = length_sn; | |
224 | uuid_copy(&hdr->uuid, &tbnet_svc_uuid); | |
225 | uuid_copy(&hdr->initiator_uuid, initiator_uuid); | |
226 | uuid_copy(&hdr->target_uuid, target_uuid); | |
227 | hdr->type = type; | |
228 | hdr->command_id = command_id; | |
229 | } | |
230 | ||
231 | static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, | |
232 | u32 command_id) | |
233 | { | |
234 | struct thunderbolt_ip_login_response reply; | |
235 | struct tb_xdomain *xd = net->xd; | |
236 | ||
237 | memset(&reply, 0, sizeof(reply)); | |
238 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
239 | xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), | |
240 | command_id); | |
241 | memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); | |
242 | reply.receiver_mac_len = ETH_ALEN; | |
243 | ||
244 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
245 | TB_CFG_PKG_XDOMAIN_RESP); | |
246 | } | |
247 | ||
248 | static int tbnet_login_request(struct tbnet *net, u8 sequence) | |
249 | { | |
250 | struct thunderbolt_ip_login_response reply; | |
251 | struct thunderbolt_ip_login request; | |
252 | struct tb_xdomain *xd = net->xd; | |
253 | ||
254 | memset(&request, 0, sizeof(request)); | |
255 | tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, | |
256 | xd->remote_uuid, TBIP_LOGIN, sizeof(request), | |
257 | atomic_inc_return(&net->command_id)); | |
258 | ||
259 | request.proto_version = TBIP_LOGIN_PROTO_VERSION; | |
260 | request.transmit_path = TBNET_LOCAL_PATH; | |
261 | ||
262 | return tb_xdomain_request(xd, &request, sizeof(request), | |
263 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
264 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
265 | TBNET_LOGIN_TIMEOUT); | |
266 | } | |
267 | ||
268 | static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, | |
269 | u32 command_id) | |
270 | { | |
271 | struct thunderbolt_ip_status reply; | |
272 | struct tb_xdomain *xd = net->xd; | |
273 | ||
274 | memset(&reply, 0, sizeof(reply)); | |
275 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
276 | xd->remote_uuid, TBIP_STATUS, sizeof(reply), | |
277 | atomic_inc_return(&net->command_id)); | |
278 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
279 | TB_CFG_PKG_XDOMAIN_RESP); | |
280 | } | |
281 | ||
282 | static int tbnet_logout_request(struct tbnet *net) | |
283 | { | |
284 | struct thunderbolt_ip_logout request; | |
285 | struct thunderbolt_ip_status reply; | |
286 | struct tb_xdomain *xd = net->xd; | |
287 | ||
288 | memset(&request, 0, sizeof(request)); | |
289 | tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, | |
290 | xd->remote_uuid, TBIP_LOGOUT, sizeof(request), | |
291 | atomic_inc_return(&net->command_id)); | |
292 | ||
293 | return tb_xdomain_request(xd, &request, sizeof(request), | |
294 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
295 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
296 | TBNET_LOGOUT_TIMEOUT); | |
297 | } | |
298 | ||
299 | static void start_login(struct tbnet *net) | |
300 | { | |
301 | mutex_lock(&net->connection_lock); | |
302 | net->login_sent = false; | |
303 | net->login_received = false; | |
304 | mutex_unlock(&net->connection_lock); | |
305 | ||
306 | queue_delayed_work(system_long_wq, &net->login_work, | |
307 | msecs_to_jiffies(1000)); | |
308 | } | |
309 | ||
310 | static void stop_login(struct tbnet *net) | |
311 | { | |
312 | cancel_delayed_work_sync(&net->login_work); | |
313 | cancel_work_sync(&net->connected_work); | |
314 | } | |
315 | ||
316 | static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) | |
317 | { | |
318 | return tf->frame.size ? : TBNET_FRAME_SIZE; | |
319 | } | |
320 | ||
321 | static void tbnet_free_buffers(struct tbnet_ring *ring) | |
322 | { | |
323 | unsigned int i; | |
324 | ||
325 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
326 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
327 | struct tbnet_frame *tf = &ring->frames[i]; | |
328 | enum dma_data_direction dir; | |
329 | unsigned int order; | |
330 | size_t size; | |
331 | ||
332 | if (!tf->page) | |
333 | continue; | |
334 | ||
335 | if (ring->ring->is_tx) { | |
336 | dir = DMA_TO_DEVICE; | |
337 | order = 0; | |
338 | size = tbnet_frame_size(tf); | |
339 | } else { | |
340 | dir = DMA_FROM_DEVICE; | |
341 | order = TBNET_RX_PAGE_ORDER; | |
342 | size = TBNET_RX_PAGE_SIZE; | |
343 | } | |
344 | ||
345 | if (tf->frame.buffer_phy) | |
346 | dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, | |
347 | dir); | |
348 | ||
349 | __free_pages(tf->page, order); | |
350 | tf->page = NULL; | |
351 | } | |
352 | ||
353 | ring->cons = 0; | |
354 | ring->prod = 0; | |
355 | } | |
356 | ||
357 | static void tbnet_tear_down(struct tbnet *net, bool send_logout) | |
358 | { | |
359 | netif_carrier_off(net->dev); | |
360 | netif_stop_queue(net->dev); | |
361 | ||
362 | stop_login(net); | |
363 | ||
364 | mutex_lock(&net->connection_lock); | |
365 | ||
366 | if (net->login_sent && net->login_received) { | |
367 | int retries = TBNET_LOGOUT_RETRIES; | |
368 | ||
369 | while (send_logout && retries-- > 0) { | |
370 | int ret = tbnet_logout_request(net); | |
371 | if (ret != -ETIMEDOUT) | |
372 | break; | |
373 | } | |
374 | ||
375 | tb_ring_stop(net->rx_ring.ring); | |
376 | tb_ring_stop(net->tx_ring.ring); | |
377 | tbnet_free_buffers(&net->rx_ring); | |
378 | tbnet_free_buffers(&net->tx_ring); | |
379 | ||
380 | if (tb_xdomain_disable_paths(net->xd)) | |
381 | netdev_warn(net->dev, "failed to disable DMA paths\n"); | |
382 | } | |
383 | ||
384 | net->login_retries = 0; | |
385 | net->login_sent = false; | |
386 | net->login_received = false; | |
387 | ||
388 | mutex_unlock(&net->connection_lock); | |
389 | } | |
390 | ||
391 | static int tbnet_handle_packet(const void *buf, size_t size, void *data) | |
392 | { | |
393 | const struct thunderbolt_ip_login *pkg = buf; | |
394 | struct tbnet *net = data; | |
395 | u32 command_id; | |
396 | int ret = 0; | |
fa31f0c9 | 397 | u32 sequence; |
e69b6c02 AL |
398 | u64 route; |
399 | ||
400 | /* Make sure the packet is for us */ | |
401 | if (size < sizeof(struct thunderbolt_ip_header)) | |
402 | return 0; | |
403 | if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) | |
404 | return 0; | |
405 | if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) | |
406 | return 0; | |
407 | ||
408 | route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; | |
409 | route &= ~BIT_ULL(63); | |
410 | if (route != net->xd->route) | |
411 | return 0; | |
412 | ||
413 | sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK; | |
414 | sequence >>= TBIP_HDR_SN_SHIFT; | |
415 | command_id = pkg->hdr.command_id; | |
416 | ||
417 | switch (pkg->hdr.type) { | |
418 | case TBIP_LOGIN: | |
419 | if (!netif_running(net->dev)) | |
420 | break; | |
421 | ||
422 | ret = tbnet_login_response(net, route, sequence, | |
423 | pkg->hdr.command_id); | |
424 | if (!ret) { | |
425 | mutex_lock(&net->connection_lock); | |
426 | net->login_received = true; | |
427 | net->transmit_path = pkg->transmit_path; | |
428 | ||
429 | /* If we reached the number of max retries or | |
430 | * previous logout, schedule another round of | |
431 | * login retries | |
432 | */ | |
433 | if (net->login_retries >= TBNET_LOGIN_RETRIES || | |
434 | !net->login_sent) { | |
435 | net->login_retries = 0; | |
436 | queue_delayed_work(system_long_wq, | |
437 | &net->login_work, 0); | |
438 | } | |
439 | mutex_unlock(&net->connection_lock); | |
440 | ||
441 | queue_work(system_long_wq, &net->connected_work); | |
442 | } | |
443 | break; | |
444 | ||
445 | case TBIP_LOGOUT: | |
446 | ret = tbnet_logout_response(net, route, sequence, command_id); | |
447 | if (!ret) | |
448 | tbnet_tear_down(net, false); | |
449 | break; | |
450 | ||
451 | default: | |
452 | return 0; | |
453 | } | |
454 | ||
455 | if (ret) | |
456 | netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); | |
457 | ||
458 | return 1; | |
459 | } | |
460 | ||
461 | static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring) | |
462 | { | |
463 | return ring->prod - ring->cons; | |
464 | } | |
465 | ||
466 | static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) | |
467 | { | |
468 | struct tbnet_ring *ring = &net->rx_ring; | |
469 | int ret; | |
470 | ||
471 | while (nbuffers--) { | |
472 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
473 | unsigned int index = ring->prod & (TBNET_RING_SIZE - 1); | |
474 | struct tbnet_frame *tf = &ring->frames[index]; | |
475 | dma_addr_t dma_addr; | |
476 | ||
477 | if (tf->page) | |
478 | break; | |
479 | ||
480 | /* Allocate page (order > 0) so that it can hold maximum | |
481 | * ThunderboltIP frame (4kB) and the additional room for | |
482 | * SKB shared info required by build_skb(). | |
483 | */ | |
484 | tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); | |
485 | if (!tf->page) { | |
486 | ret = -ENOMEM; | |
487 | goto err_free; | |
488 | } | |
489 | ||
490 | dma_addr = dma_map_page(dma_dev, tf->page, 0, | |
491 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
492 | if (dma_mapping_error(dma_dev, dma_addr)) { | |
493 | ret = -ENOMEM; | |
494 | goto err_free; | |
495 | } | |
496 | ||
497 | tf->frame.buffer_phy = dma_addr; | |
498 | tf->dev = net->dev; | |
499 | ||
500 | tb_ring_rx(ring->ring, &tf->frame); | |
501 | ||
502 | ring->prod++; | |
503 | } | |
504 | ||
505 | return 0; | |
506 | ||
507 | err_free: | |
508 | tbnet_free_buffers(ring); | |
509 | return ret; | |
510 | } | |
511 | ||
512 | static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) | |
513 | { | |
514 | struct tbnet_ring *ring = &net->tx_ring; | |
515 | struct tbnet_frame *tf; | |
516 | unsigned int index; | |
517 | ||
518 | if (!tbnet_available_buffers(ring)) | |
519 | return NULL; | |
520 | ||
521 | index = ring->cons++ & (TBNET_RING_SIZE - 1); | |
522 | ||
523 | tf = &ring->frames[index]; | |
524 | tf->frame.size = 0; | |
525 | tf->frame.buffer_phy = 0; | |
526 | ||
527 | return tf; | |
528 | } | |
529 | ||
530 | static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | |
531 | bool canceled) | |
532 | { | |
533 | struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); | |
534 | struct device *dma_dev = tb_ring_dma_device(ring); | |
535 | struct tbnet *net = netdev_priv(tf->dev); | |
536 | ||
537 | dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf), | |
538 | DMA_TO_DEVICE); | |
86dabda4 | 539 | tf->frame.buffer_phy = 0; |
e69b6c02 AL |
540 | |
541 | /* Return buffer to the ring */ | |
542 | net->tx_ring.prod++; | |
543 | ||
544 | if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) | |
545 | netif_wake_queue(net->dev); | |
546 | } | |
547 | ||
548 | static int tbnet_alloc_tx_buffers(struct tbnet *net) | |
549 | { | |
550 | struct tbnet_ring *ring = &net->tx_ring; | |
551 | unsigned int i; | |
552 | ||
553 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
554 | struct tbnet_frame *tf = &ring->frames[i]; | |
555 | ||
556 | tf->page = alloc_page(GFP_KERNEL); | |
557 | if (!tf->page) { | |
558 | tbnet_free_buffers(ring); | |
559 | return -ENOMEM; | |
560 | } | |
561 | ||
562 | tf->dev = net->dev; | |
563 | tf->frame.callback = tbnet_tx_callback; | |
564 | tf->frame.sof = TBIP_PDF_FRAME_START; | |
565 | tf->frame.eof = TBIP_PDF_FRAME_END; | |
566 | } | |
567 | ||
568 | ring->cons = 0; | |
569 | ring->prod = TBNET_RING_SIZE - 1; | |
570 | ||
571 | return 0; | |
572 | } | |
573 | ||
574 | static void tbnet_connected_work(struct work_struct *work) | |
575 | { | |
576 | struct tbnet *net = container_of(work, typeof(*net), connected_work); | |
577 | bool connected; | |
578 | int ret; | |
579 | ||
580 | if (netif_carrier_ok(net->dev)) | |
581 | return; | |
582 | ||
583 | mutex_lock(&net->connection_lock); | |
584 | connected = net->login_sent && net->login_received; | |
585 | mutex_unlock(&net->connection_lock); | |
586 | ||
587 | if (!connected) | |
588 | return; | |
589 | ||
590 | /* Both logins successful so enable the high-speed DMA paths and | |
591 | * start the network device queue. | |
592 | */ | |
593 | ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, | |
594 | net->rx_ring.ring->hop, | |
595 | net->transmit_path, | |
596 | net->tx_ring.ring->hop); | |
597 | if (ret) { | |
598 | netdev_err(net->dev, "failed to enable DMA paths\n"); | |
599 | return; | |
600 | } | |
601 | ||
602 | tb_ring_start(net->tx_ring.ring); | |
603 | tb_ring_start(net->rx_ring.ring); | |
604 | ||
605 | ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); | |
606 | if (ret) | |
607 | goto err_stop_rings; | |
608 | ||
609 | ret = tbnet_alloc_tx_buffers(net); | |
610 | if (ret) | |
611 | goto err_free_rx_buffers; | |
612 | ||
613 | netif_carrier_on(net->dev); | |
614 | netif_start_queue(net->dev); | |
615 | return; | |
616 | ||
617 | err_free_rx_buffers: | |
618 | tbnet_free_buffers(&net->rx_ring); | |
619 | err_stop_rings: | |
620 | tb_ring_stop(net->rx_ring.ring); | |
621 | tb_ring_stop(net->tx_ring.ring); | |
622 | } | |
623 | ||
624 | static void tbnet_login_work(struct work_struct *work) | |
625 | { | |
626 | struct tbnet *net = container_of(work, typeof(*net), login_work.work); | |
627 | unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY); | |
628 | int ret; | |
629 | ||
630 | if (netif_carrier_ok(net->dev)) | |
631 | return; | |
632 | ||
633 | ret = tbnet_login_request(net, net->login_retries % 4); | |
634 | if (ret) { | |
635 | if (net->login_retries++ < TBNET_LOGIN_RETRIES) { | |
636 | queue_delayed_work(system_long_wq, &net->login_work, | |
637 | delay); | |
638 | } else { | |
639 | netdev_info(net->dev, "ThunderboltIP login timed out\n"); | |
640 | } | |
641 | } else { | |
642 | net->login_retries = 0; | |
643 | ||
644 | mutex_lock(&net->connection_lock); | |
645 | net->login_sent = true; | |
646 | mutex_unlock(&net->connection_lock); | |
647 | ||
648 | queue_work(system_long_wq, &net->connected_work); | |
649 | } | |
650 | } | |
651 | ||
652 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, | |
653 | const struct thunderbolt_ip_frame_header *hdr) | |
654 | { | |
655 | u32 frame_id, frame_count, frame_size, frame_index; | |
656 | unsigned int size; | |
657 | ||
658 | if (tf->frame.flags & RING_DESC_CRC_ERROR) { | |
659 | net->stats.rx_crc_errors++; | |
660 | return false; | |
661 | } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { | |
662 | net->stats.rx_over_errors++; | |
663 | return false; | |
664 | } | |
665 | ||
666 | /* Should be greater than just header i.e. contains data */ | |
667 | size = tbnet_frame_size(tf); | |
668 | if (size <= sizeof(*hdr)) { | |
669 | net->stats.rx_length_errors++; | |
670 | return false; | |
671 | } | |
672 | ||
673 | frame_count = le32_to_cpu(hdr->frame_count); | |
674 | frame_size = le32_to_cpu(hdr->frame_size); | |
675 | frame_index = le16_to_cpu(hdr->frame_index); | |
676 | frame_id = le16_to_cpu(hdr->frame_id); | |
677 | ||
678 | if ((frame_size > size - sizeof(*hdr)) || !frame_size) { | |
679 | net->stats.rx_length_errors++; | |
680 | return false; | |
681 | } | |
682 | ||
683 | /* In case we're in the middle of packet, validate the frame | |
684 | * header based on first fragment of the packet. | |
685 | */ | |
686 | if (net->skb && net->rx_hdr.frame_count) { | |
687 | /* Check the frame count fits the count field */ | |
688 | if (frame_count != net->rx_hdr.frame_count) { | |
689 | net->stats.rx_length_errors++; | |
690 | return false; | |
691 | } | |
692 | ||
693 | /* Check the frame identifiers are incremented correctly, | |
694 | * and id is matching. | |
695 | */ | |
696 | if (frame_index != net->rx_hdr.frame_index + 1 || | |
697 | frame_id != net->rx_hdr.frame_id) { | |
698 | net->stats.rx_missed_errors++; | |
699 | return false; | |
700 | } | |
701 | ||
702 | if (net->skb->len + frame_size > TBNET_MAX_MTU) { | |
703 | net->stats.rx_length_errors++; | |
704 | return false; | |
705 | } | |
706 | ||
707 | return true; | |
708 | } | |
709 | ||
710 | /* Start of packet, validate the frame header */ | |
711 | if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { | |
712 | net->stats.rx_length_errors++; | |
713 | return false; | |
714 | } | |
715 | if (frame_index != 0) { | |
716 | net->stats.rx_missed_errors++; | |
717 | return false; | |
718 | } | |
719 | ||
720 | return true; | |
721 | } | |
722 | ||
723 | static int tbnet_poll(struct napi_struct *napi, int budget) | |
724 | { | |
725 | struct tbnet *net = container_of(napi, struct tbnet, napi); | |
726 | unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); | |
727 | struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); | |
728 | unsigned int rx_packets = 0; | |
729 | ||
730 | while (rx_packets < budget) { | |
731 | const struct thunderbolt_ip_frame_header *hdr; | |
732 | unsigned int hdr_size = sizeof(*hdr); | |
733 | struct sk_buff *skb = NULL; | |
734 | struct ring_frame *frame; | |
735 | struct tbnet_frame *tf; | |
736 | struct page *page; | |
737 | bool last = true; | |
738 | u32 frame_size; | |
739 | ||
740 | /* Return some buffers to hardware, one at a time is too | |
741 | * slow so allocate MAX_SKB_FRAGS buffers at the same | |
742 | * time. | |
743 | */ | |
744 | if (cleaned_count >= MAX_SKB_FRAGS) { | |
745 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
746 | cleaned_count = 0; | |
747 | } | |
748 | ||
749 | frame = tb_ring_poll(net->rx_ring.ring); | |
750 | if (!frame) | |
751 | break; | |
752 | ||
753 | dma_unmap_page(dma_dev, frame->buffer_phy, | |
754 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
755 | ||
756 | tf = container_of(frame, typeof(*tf), frame); | |
757 | ||
758 | page = tf->page; | |
759 | tf->page = NULL; | |
760 | net->rx_ring.cons++; | |
761 | cleaned_count++; | |
762 | ||
763 | hdr = page_address(page); | |
764 | if (!tbnet_check_frame(net, tf, hdr)) { | |
765 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
766 | dev_kfree_skb_any(net->skb); | |
767 | net->skb = NULL; | |
768 | continue; | |
769 | } | |
770 | ||
771 | frame_size = le32_to_cpu(hdr->frame_size); | |
772 | ||
773 | skb = net->skb; | |
774 | if (!skb) { | |
775 | skb = build_skb(page_address(page), | |
776 | TBNET_RX_PAGE_SIZE); | |
777 | if (!skb) { | |
778 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
779 | net->stats.rx_errors++; | |
780 | break; | |
781 | } | |
782 | ||
783 | skb_reserve(skb, hdr_size); | |
784 | skb_put(skb, frame_size); | |
785 | ||
786 | net->skb = skb; | |
787 | } else { | |
788 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
789 | page, hdr_size, frame_size, | |
790 | TBNET_RX_PAGE_SIZE - hdr_size); | |
791 | } | |
792 | ||
793 | net->rx_hdr.frame_size = frame_size; | |
794 | net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); | |
795 | net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); | |
796 | net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); | |
797 | last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; | |
798 | ||
799 | rx_packets++; | |
800 | net->stats.rx_bytes += frame_size; | |
801 | ||
802 | if (last) { | |
803 | skb->protocol = eth_type_trans(skb, net->dev); | |
804 | napi_gro_receive(&net->napi, skb); | |
805 | net->skb = NULL; | |
806 | } | |
807 | } | |
808 | ||
809 | net->stats.rx_packets += rx_packets; | |
810 | ||
811 | if (cleaned_count) | |
812 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
813 | ||
814 | if (rx_packets >= budget) | |
815 | return budget; | |
816 | ||
817 | napi_complete_done(napi, rx_packets); | |
818 | /* Re-enable the ring interrupt */ | |
819 | tb_ring_poll_complete(net->rx_ring.ring); | |
820 | ||
821 | return rx_packets; | |
822 | } | |
823 | ||
824 | static void tbnet_start_poll(void *data) | |
825 | { | |
826 | struct tbnet *net = data; | |
827 | ||
828 | napi_schedule(&net->napi); | |
829 | } | |
830 | ||
831 | static int tbnet_open(struct net_device *dev) | |
832 | { | |
833 | struct tbnet *net = netdev_priv(dev); | |
834 | struct tb_xdomain *xd = net->xd; | |
835 | u16 sof_mask, eof_mask; | |
836 | struct tb_ring *ring; | |
837 | ||
838 | netif_carrier_off(dev); | |
839 | ||
840 | ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, | |
841 | RING_FLAG_FRAME); | |
842 | if (!ring) { | |
843 | netdev_err(dev, "failed to allocate Tx ring\n"); | |
844 | return -ENOMEM; | |
845 | } | |
846 | net->tx_ring.ring = ring; | |
847 | ||
848 | sof_mask = BIT(TBIP_PDF_FRAME_START); | |
849 | eof_mask = BIT(TBIP_PDF_FRAME_END); | |
850 | ||
851 | ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, | |
852 | RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask, | |
853 | eof_mask, tbnet_start_poll, net); | |
854 | if (!ring) { | |
855 | netdev_err(dev, "failed to allocate Rx ring\n"); | |
856 | tb_ring_free(net->tx_ring.ring); | |
857 | net->tx_ring.ring = NULL; | |
858 | return -ENOMEM; | |
859 | } | |
860 | net->rx_ring.ring = ring; | |
861 | ||
862 | napi_enable(&net->napi); | |
863 | start_login(net); | |
864 | ||
865 | return 0; | |
866 | } | |
867 | ||
868 | static int tbnet_stop(struct net_device *dev) | |
869 | { | |
870 | struct tbnet *net = netdev_priv(dev); | |
871 | ||
872 | napi_disable(&net->napi); | |
873 | ||
874 | tbnet_tear_down(net, true); | |
875 | ||
876 | tb_ring_free(net->rx_ring.ring); | |
877 | net->rx_ring.ring = NULL; | |
878 | tb_ring_free(net->tx_ring.ring); | |
879 | net->tx_ring.ring = NULL; | |
880 | ||
881 | return 0; | |
882 | } | |
883 | ||
884 | static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf) | |
885 | { | |
886 | dma_addr_t dma_addr; | |
887 | ||
888 | dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf), | |
889 | DMA_TO_DEVICE); | |
890 | if (dma_mapping_error(dma_dev, dma_addr)) | |
891 | return false; | |
892 | ||
893 | tf->frame.buffer_phy = dma_addr; | |
894 | return true; | |
895 | } | |
896 | ||
897 | static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, | |
898 | struct tbnet_frame **frames, u32 frame_count) | |
899 | { | |
900 | struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); | |
901 | struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); | |
902 | __wsum wsum = htonl(skb->len - skb_transport_offset(skb)); | |
903 | unsigned int i, len, offset = skb_transport_offset(skb); | |
904 | __be16 protocol = skb->protocol; | |
905 | void *data = skb->data; | |
906 | void *dest = hdr + 1; | |
907 | __sum16 *tucso; | |
908 | ||
909 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | |
910 | /* No need to calculate checksum so we just update the | |
911 | * total frame count and map the frames for DMA. | |
912 | */ | |
913 | for (i = 0; i < frame_count; i++) { | |
914 | hdr = page_address(frames[i]->page); | |
915 | hdr->frame_count = cpu_to_le32(frame_count); | |
916 | if (!tbnet_xmit_map(dma_dev, frames[i])) | |
917 | goto err_unmap; | |
918 | } | |
919 | ||
920 | return true; | |
921 | } | |
922 | ||
923 | if (protocol == htons(ETH_P_8021Q)) { | |
924 | struct vlan_hdr *vhdr, vh; | |
925 | ||
926 | vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh); | |
927 | if (!vhdr) | |
928 | return false; | |
929 | ||
930 | protocol = vhdr->h_vlan_encapsulated_proto; | |
931 | } | |
932 | ||
933 | /* Data points on the beginning of packet. | |
934 | * Check is the checksum absolute place in the packet. | |
935 | * ipcso will update IP checksum. | |
936 | * tucso will update TCP/UPD checksum. | |
937 | */ | |
938 | if (protocol == htons(ETH_P_IP)) { | |
939 | __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data); | |
940 | ||
941 | *ipcso = 0; | |
942 | *ipcso = ip_fast_csum(dest + skb_network_offset(skb), | |
943 | ip_hdr(skb)->ihl); | |
944 | ||
945 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
946 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
947 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
948 | tucso = dest + ((void *)&(udp_hdr(skb)->check) - data); | |
949 | else | |
950 | return false; | |
951 | ||
952 | *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
953 | ip_hdr(skb)->daddr, 0, | |
954 | ip_hdr(skb)->protocol, 0); | |
955 | } else if (skb_is_gso_v6(skb)) { | |
956 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
957 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
958 | &ipv6_hdr(skb)->daddr, 0, | |
959 | IPPROTO_TCP, 0); | |
960 | return false; | |
961 | } else if (protocol == htons(ETH_P_IPV6)) { | |
962 | tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; | |
963 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
964 | &ipv6_hdr(skb)->daddr, 0, | |
965 | ipv6_hdr(skb)->nexthdr, 0); | |
966 | } else { | |
967 | return false; | |
968 | } | |
969 | ||
970 | /* First frame was headers, rest of the frames contain data. | |
971 | * Calculate checksum over each frame. | |
972 | */ | |
973 | for (i = 0; i < frame_count; i++) { | |
974 | hdr = page_address(frames[i]->page); | |
975 | dest = (void *)(hdr + 1) + offset; | |
976 | len = le32_to_cpu(hdr->frame_size) - offset; | |
977 | wsum = csum_partial(dest, len, wsum); | |
978 | hdr->frame_count = cpu_to_le32(frame_count); | |
979 | ||
980 | offset = 0; | |
981 | } | |
982 | ||
983 | *tucso = csum_fold(wsum); | |
984 | ||
985 | /* Checksum is finally calculated and we don't touch the memory | |
986 | * anymore, so DMA map the frames now. | |
987 | */ | |
988 | for (i = 0; i < frame_count; i++) { | |
989 | if (!tbnet_xmit_map(dma_dev, frames[i])) | |
990 | goto err_unmap; | |
991 | } | |
992 | ||
993 | return true; | |
994 | ||
995 | err_unmap: | |
996 | while (i--) | |
997 | dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy, | |
998 | tbnet_frame_size(frames[i]), DMA_TO_DEVICE); | |
999 | ||
1000 | return false; | |
1001 | } | |
1002 | ||
1003 | static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, | |
1004 | unsigned int *len) | |
1005 | { | |
1006 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; | |
1007 | ||
1008 | *len = skb_frag_size(frag); | |
1009 | return kmap_atomic(skb_frag_page(frag)) + frag->page_offset; | |
1010 | } | |
1011 | ||
1012 | static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, | |
1013 | struct net_device *dev) | |
1014 | { | |
1015 | struct tbnet *net = netdev_priv(dev); | |
1016 | struct tbnet_frame *frames[MAX_SKB_FRAGS]; | |
1017 | u16 frame_id = atomic_read(&net->frame_id); | |
1018 | struct thunderbolt_ip_frame_header *hdr; | |
1019 | unsigned int len = skb_headlen(skb); | |
1020 | unsigned int data_len = skb->len; | |
1021 | unsigned int nframes, i; | |
1022 | unsigned int frag = 0; | |
1023 | void *src = skb->data; | |
1024 | u32 frame_index = 0; | |
1025 | bool unmap = false; | |
1026 | void *dest; | |
1027 | ||
1028 | nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); | |
1029 | if (tbnet_available_buffers(&net->tx_ring) < nframes) { | |
1030 | netif_stop_queue(net->dev); | |
1031 | return NETDEV_TX_BUSY; | |
1032 | } | |
1033 | ||
1034 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1035 | if (!frames[frame_index]) | |
1036 | goto err_drop; | |
1037 | ||
1038 | hdr = page_address(frames[frame_index]->page); | |
1039 | dest = hdr + 1; | |
1040 | ||
1041 | /* If overall packet is bigger than the frame data size */ | |
1042 | while (data_len > TBNET_MAX_PAYLOAD_SIZE) { | |
1043 | unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE; | |
1044 | ||
1045 | hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE); | |
1046 | hdr->frame_index = cpu_to_le16(frame_index); | |
1047 | hdr->frame_id = cpu_to_le16(frame_id); | |
1048 | ||
1049 | do { | |
1050 | if (len > size_left) { | |
1051 | /* Copy data onto Tx buffer data with | |
1052 | * full frame size then break and go to | |
1053 | * next frame | |
1054 | */ | |
1055 | memcpy(dest, src, size_left); | |
1056 | len -= size_left; | |
1057 | dest += size_left; | |
1058 | src += size_left; | |
1059 | break; | |
1060 | } | |
1061 | ||
1062 | memcpy(dest, src, len); | |
1063 | size_left -= len; | |
1064 | dest += len; | |
1065 | ||
1066 | if (unmap) { | |
1067 | kunmap_atomic(src); | |
1068 | unmap = false; | |
1069 | } | |
1070 | ||
1071 | /* Ensure all fragments have been processed */ | |
1072 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1073 | /* Map and then unmap quickly */ | |
1074 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1075 | unmap = true; | |
1076 | } else if (unlikely(size_left > 0)) { | |
1077 | goto err_drop; | |
1078 | } | |
1079 | } while (size_left > 0); | |
1080 | ||
1081 | data_len -= TBNET_MAX_PAYLOAD_SIZE; | |
1082 | frame_index++; | |
1083 | ||
1084 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1085 | if (!frames[frame_index]) | |
1086 | goto err_drop; | |
1087 | ||
1088 | hdr = page_address(frames[frame_index]->page); | |
1089 | dest = hdr + 1; | |
1090 | } | |
1091 | ||
1092 | hdr->frame_size = cpu_to_le32(data_len); | |
1093 | hdr->frame_index = cpu_to_le16(frame_index); | |
1094 | hdr->frame_id = cpu_to_le16(frame_id); | |
1095 | ||
1096 | frames[frame_index]->frame.size = data_len + sizeof(*hdr); | |
1097 | ||
1098 | /* In case the remaining data_len is smaller than a frame */ | |
1099 | while (len < data_len) { | |
1100 | memcpy(dest, src, len); | |
1101 | data_len -= len; | |
1102 | dest += len; | |
1103 | ||
1104 | if (unmap) { | |
1105 | kunmap_atomic(src); | |
1106 | unmap = false; | |
1107 | } | |
1108 | ||
1109 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1110 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1111 | unmap = true; | |
1112 | } else if (unlikely(data_len > 0)) { | |
1113 | goto err_drop; | |
1114 | } | |
1115 | } | |
1116 | ||
1117 | memcpy(dest, src, data_len); | |
1118 | ||
1119 | if (unmap) | |
1120 | kunmap_atomic(src); | |
1121 | ||
1122 | if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) | |
1123 | goto err_drop; | |
1124 | ||
1125 | for (i = 0; i < frame_index + 1; i++) | |
1126 | tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); | |
1127 | ||
1128 | if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) | |
1129 | atomic_inc(&net->frame_id); | |
1130 | ||
1131 | net->stats.tx_packets++; | |
1132 | net->stats.tx_bytes += skb->len; | |
1133 | ||
1134 | dev_consume_skb_any(skb); | |
1135 | ||
1136 | return NETDEV_TX_OK; | |
1137 | ||
1138 | err_drop: | |
1139 | /* We can re-use the buffers */ | |
1140 | net->tx_ring.cons -= frame_index; | |
1141 | ||
1142 | dev_kfree_skb_any(skb); | |
1143 | net->stats.tx_errors++; | |
1144 | ||
1145 | return NETDEV_TX_OK; | |
1146 | } | |
1147 | ||
1148 | static void tbnet_get_stats64(struct net_device *dev, | |
1149 | struct rtnl_link_stats64 *stats) | |
1150 | { | |
1151 | struct tbnet *net = netdev_priv(dev); | |
1152 | ||
1153 | stats->tx_packets = net->stats.tx_packets; | |
1154 | stats->rx_packets = net->stats.rx_packets; | |
1155 | stats->tx_bytes = net->stats.tx_bytes; | |
1156 | stats->rx_bytes = net->stats.rx_bytes; | |
1157 | stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + | |
1158 | net->stats.rx_over_errors + net->stats.rx_crc_errors + | |
1159 | net->stats.rx_missed_errors; | |
1160 | stats->tx_errors = net->stats.tx_errors; | |
1161 | stats->rx_length_errors = net->stats.rx_length_errors; | |
1162 | stats->rx_over_errors = net->stats.rx_over_errors; | |
1163 | stats->rx_crc_errors = net->stats.rx_crc_errors; | |
1164 | stats->rx_missed_errors = net->stats.rx_missed_errors; | |
1165 | } | |
1166 | ||
1167 | static const struct net_device_ops tbnet_netdev_ops = { | |
1168 | .ndo_open = tbnet_open, | |
1169 | .ndo_stop = tbnet_stop, | |
1170 | .ndo_start_xmit = tbnet_start_xmit, | |
1171 | .ndo_get_stats64 = tbnet_get_stats64, | |
1172 | }; | |
1173 | ||
1174 | static void tbnet_generate_mac(struct net_device *dev) | |
1175 | { | |
1176 | const struct tbnet *net = netdev_priv(dev); | |
1177 | const struct tb_xdomain *xd = net->xd; | |
1178 | u8 phy_port; | |
1179 | u32 hash; | |
1180 | ||
1181 | phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); | |
1182 | ||
1183 | /* Unicast and locally administered MAC */ | |
1184 | dev->dev_addr[0] = phy_port << 4 | 0x02; | |
1185 | hash = jhash2((u32 *)xd->local_uuid, 4, 0); | |
1186 | memcpy(dev->dev_addr + 1, &hash, sizeof(hash)); | |
1187 | hash = jhash2((u32 *)xd->local_uuid, 4, hash); | |
1188 | dev->dev_addr[5] = hash & 0xff; | |
1189 | } | |
1190 | ||
1191 | static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) | |
1192 | { | |
1193 | struct tb_xdomain *xd = tb_service_parent(svc); | |
1194 | struct net_device *dev; | |
1195 | struct tbnet *net; | |
1196 | int ret; | |
1197 | ||
1198 | dev = alloc_etherdev(sizeof(*net)); | |
1199 | if (!dev) | |
1200 | return -ENOMEM; | |
1201 | ||
1202 | SET_NETDEV_DEV(dev, &svc->dev); | |
1203 | ||
1204 | net = netdev_priv(dev); | |
1205 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); | |
1206 | INIT_WORK(&net->connected_work, tbnet_connected_work); | |
1207 | mutex_init(&net->connection_lock); | |
1208 | atomic_set(&net->command_id, 0); | |
1209 | atomic_set(&net->frame_id, 0); | |
1210 | net->svc = svc; | |
1211 | net->dev = dev; | |
1212 | net->xd = xd; | |
1213 | ||
1214 | tbnet_generate_mac(dev); | |
1215 | ||
1216 | strcpy(dev->name, "thunderbolt%d"); | |
1217 | dev->netdev_ops = &tbnet_netdev_ops; | |
1218 | ||
1219 | /* ThunderboltIP takes advantage of TSO packets but instead of | |
1220 | * segmenting them we just split the packet into Thunderbolt | |
1221 | * frames (maximum payload size of each frame is 4084 bytes) and | |
1222 | * calculate checksum over the whole packet here. | |
1223 | * | |
1224 | * The receiving side does the opposite if the host OS supports | |
1225 | * LRO, otherwise it needs to split the large packet into MTU | |
1226 | * sized smaller packets. | |
1227 | * | |
1228 | * In order to receive large packets from the networking stack, | |
1229 | * we need to announce support for most of the offloading | |
1230 | * features here. | |
1231 | */ | |
1232 | dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO | | |
1233 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1234 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; | |
1235 | dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header); | |
1236 | ||
1237 | netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT); | |
1238 | ||
1239 | /* MTU range: 68 - 65522 */ | |
1240 | dev->min_mtu = ETH_MIN_MTU; | |
1241 | dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; | |
1242 | ||
1243 | net->handler.uuid = &tbnet_svc_uuid; | |
1244 | net->handler.callback = tbnet_handle_packet, | |
1245 | net->handler.data = net; | |
1246 | tb_register_protocol_handler(&net->handler); | |
1247 | ||
1248 | tb_service_set_drvdata(svc, net); | |
1249 | ||
1250 | ret = register_netdev(dev); | |
1251 | if (ret) { | |
1252 | tb_unregister_protocol_handler(&net->handler); | |
1253 | free_netdev(dev); | |
1254 | return ret; | |
1255 | } | |
1256 | ||
1257 | return 0; | |
1258 | } | |
1259 | ||
1260 | static void tbnet_remove(struct tb_service *svc) | |
1261 | { | |
1262 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1263 | ||
1264 | unregister_netdev(net->dev); | |
1265 | tb_unregister_protocol_handler(&net->handler); | |
1266 | free_netdev(net->dev); | |
1267 | } | |
1268 | ||
1269 | static void tbnet_shutdown(struct tb_service *svc) | |
1270 | { | |
1271 | tbnet_tear_down(tb_service_get_drvdata(svc), true); | |
1272 | } | |
1273 | ||
1274 | static int __maybe_unused tbnet_suspend(struct device *dev) | |
1275 | { | |
1276 | struct tb_service *svc = tb_to_service(dev); | |
1277 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1278 | ||
1279 | stop_login(net); | |
1280 | if (netif_running(net->dev)) { | |
1281 | netif_device_detach(net->dev); | |
1282 | tb_ring_stop(net->rx_ring.ring); | |
1283 | tb_ring_stop(net->tx_ring.ring); | |
1284 | tbnet_free_buffers(&net->rx_ring); | |
1285 | tbnet_free_buffers(&net->tx_ring); | |
1286 | } | |
1287 | ||
1288 | return 0; | |
1289 | } | |
1290 | ||
1291 | static int __maybe_unused tbnet_resume(struct device *dev) | |
1292 | { | |
1293 | struct tb_service *svc = tb_to_service(dev); | |
1294 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1295 | ||
1296 | netif_carrier_off(net->dev); | |
1297 | if (netif_running(net->dev)) { | |
1298 | netif_device_attach(net->dev); | |
1299 | start_login(net); | |
1300 | } | |
1301 | ||
1302 | return 0; | |
1303 | } | |
1304 | ||
1305 | static const struct dev_pm_ops tbnet_pm_ops = { | |
1306 | SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume) | |
1307 | }; | |
1308 | ||
1309 | static const struct tb_service_id tbnet_ids[] = { | |
1310 | { TB_SERVICE("network", 1) }, | |
1311 | { }, | |
1312 | }; | |
1313 | MODULE_DEVICE_TABLE(tbsvc, tbnet_ids); | |
1314 | ||
1315 | static struct tb_service_driver tbnet_driver = { | |
1316 | .driver = { | |
1317 | .owner = THIS_MODULE, | |
1318 | .name = "thunderbolt-net", | |
1319 | .pm = &tbnet_pm_ops, | |
1320 | }, | |
1321 | .probe = tbnet_probe, | |
1322 | .remove = tbnet_remove, | |
1323 | .shutdown = tbnet_shutdown, | |
1324 | .id_table = tbnet_ids, | |
1325 | }; | |
1326 | ||
1327 | static int __init tbnet_init(void) | |
1328 | { | |
1329 | int ret; | |
1330 | ||
1331 | tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid); | |
1332 | if (!tbnet_dir) | |
1333 | return -ENOMEM; | |
1334 | ||
1335 | tb_property_add_immediate(tbnet_dir, "prtcid", 1); | |
1336 | tb_property_add_immediate(tbnet_dir, "prtcvers", 1); | |
1337 | tb_property_add_immediate(tbnet_dir, "prtcrevs", 1); | |
1338 | tb_property_add_immediate(tbnet_dir, "prtcstns", | |
1339 | TBNET_MATCH_FRAGS_ID); | |
1340 | ||
1341 | ret = tb_register_property_dir("network", tbnet_dir); | |
1342 | if (ret) { | |
1343 | tb_property_free_dir(tbnet_dir); | |
1344 | return ret; | |
1345 | } | |
1346 | ||
1347 | return tb_register_service_driver(&tbnet_driver); | |
1348 | } | |
1349 | module_init(tbnet_init); | |
1350 | ||
1351 | static void __exit tbnet_exit(void) | |
1352 | { | |
1353 | tb_unregister_service_driver(&tbnet_driver); | |
1354 | tb_unregister_property_dir("network", tbnet_dir); | |
1355 | tb_property_free_dir(tbnet_dir); | |
1356 | } | |
1357 | module_exit(tbnet_exit); | |
1358 | ||
1359 | MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>"); | |
1360 | MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>"); | |
1361 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); | |
1362 | MODULE_DESCRIPTION("Thunderbolt network driver"); | |
1363 | MODULE_LICENSE("GPL v2"); |