]>
Commit | Line | Data |
---|---|---|
7e3af593 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e69b6c02 AL |
2 | /* |
3 | * Networking over Thunderbolt cable using Apple ThunderboltIP protocol | |
4 | * | |
5 | * Copyright (C) 2017, Intel Corporation | |
6 | * Authors: Amir Levy <amir.jer.levy@intel.com> | |
7 | * Michael Jamet <michael.jamet@intel.com> | |
8 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
e69b6c02 AL |
9 | */ |
10 | ||
11 | #include <linux/atomic.h> | |
12 | #include <linux/highmem.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <linux/jhash.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/etherdevice.h> | |
17 | #include <linux/rtnetlink.h> | |
18 | #include <linux/sizes.h> | |
19 | #include <linux/thunderbolt.h> | |
20 | #include <linux/uuid.h> | |
21 | #include <linux/workqueue.h> | |
22 | ||
23 | #include <net/ip6_checksum.h> | |
24 | ||
25 | /* Protocol timeouts in ms */ | |
26 | #define TBNET_LOGIN_DELAY 4500 | |
27 | #define TBNET_LOGIN_TIMEOUT 500 | |
95240075 | 28 | #define TBNET_LOGOUT_TIMEOUT 1000 |
e69b6c02 AL |
29 | |
30 | #define TBNET_RING_SIZE 256 | |
e69b6c02 | 31 | #define TBNET_LOGIN_RETRIES 60 |
95240075 | 32 | #define TBNET_LOGOUT_RETRIES 10 |
e69b6c02 | 33 | #define TBNET_MATCH_FRAGS_ID BIT(1) |
95240075 | 34 | #define TBNET_64K_FRAMES BIT(2) |
e69b6c02 AL |
35 | #define TBNET_MAX_MTU SZ_64K |
36 | #define TBNET_FRAME_SIZE SZ_4K | |
37 | #define TBNET_MAX_PAYLOAD_SIZE \ | |
38 | (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header)) | |
39 | /* Rx packets need to hold space for skb_shared_info */ | |
40 | #define TBNET_RX_MAX_SIZE \ | |
41 | (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | |
42 | #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE) | |
43 | #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER) | |
44 | ||
45 | #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0)) | |
46 | ||
47 | /** | |
48 | * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame | |
49 | * @frame_size: size of the data with the frame | |
50 | * @frame_index: running index on the frames | |
51 | * @frame_id: ID of the frame to match frames to specific packet | |
52 | * @frame_count: how many frames assembles a full packet | |
53 | * | |
54 | * Each data frame passed to the high-speed DMA ring has this header. If | |
55 | * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is | |
56 | * supported then @frame_id is filled, otherwise it stays %0. | |
57 | */ | |
58 | struct thunderbolt_ip_frame_header { | |
59 | u32 frame_size; | |
60 | u16 frame_index; | |
61 | u16 frame_id; | |
62 | u32 frame_count; | |
63 | }; | |
64 | ||
65 | enum thunderbolt_ip_frame_pdf { | |
66 | TBIP_PDF_FRAME_START = 1, | |
67 | TBIP_PDF_FRAME_END, | |
68 | }; | |
69 | ||
70 | enum thunderbolt_ip_type { | |
71 | TBIP_LOGIN, | |
72 | TBIP_LOGIN_RESPONSE, | |
73 | TBIP_LOGOUT, | |
74 | TBIP_STATUS, | |
75 | }; | |
76 | ||
77 | struct thunderbolt_ip_header { | |
78 | u32 route_hi; | |
79 | u32 route_lo; | |
80 | u32 length_sn; | |
81 | uuid_t uuid; | |
82 | uuid_t initiator_uuid; | |
83 | uuid_t target_uuid; | |
84 | u32 type; | |
85 | u32 command_id; | |
86 | }; | |
87 | ||
88 | #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0) | |
89 | #define TBIP_HDR_SN_MASK GENMASK(28, 27) | |
90 | #define TBIP_HDR_SN_SHIFT 27 | |
91 | ||
92 | struct thunderbolt_ip_login { | |
93 | struct thunderbolt_ip_header hdr; | |
94 | u32 proto_version; | |
95 | u32 transmit_path; | |
96 | u32 reserved[4]; | |
97 | }; | |
98 | ||
99 | #define TBIP_LOGIN_PROTO_VERSION 1 | |
100 | ||
101 | struct thunderbolt_ip_login_response { | |
102 | struct thunderbolt_ip_header hdr; | |
103 | u32 status; | |
104 | u32 receiver_mac[2]; | |
105 | u32 receiver_mac_len; | |
106 | u32 reserved[4]; | |
107 | }; | |
108 | ||
109 | struct thunderbolt_ip_logout { | |
110 | struct thunderbolt_ip_header hdr; | |
111 | }; | |
112 | ||
113 | struct thunderbolt_ip_status { | |
114 | struct thunderbolt_ip_header hdr; | |
115 | u32 status; | |
116 | }; | |
117 | ||
118 | struct tbnet_stats { | |
119 | u64 tx_packets; | |
120 | u64 rx_packets; | |
121 | u64 tx_bytes; | |
122 | u64 rx_bytes; | |
123 | u64 rx_errors; | |
124 | u64 tx_errors; | |
125 | u64 rx_length_errors; | |
126 | u64 rx_over_errors; | |
127 | u64 rx_crc_errors; | |
128 | u64 rx_missed_errors; | |
129 | }; | |
130 | ||
131 | struct tbnet_frame { | |
132 | struct net_device *dev; | |
133 | struct page *page; | |
134 | struct ring_frame frame; | |
135 | }; | |
136 | ||
137 | struct tbnet_ring { | |
138 | struct tbnet_frame frames[TBNET_RING_SIZE]; | |
139 | unsigned int cons; | |
140 | unsigned int prod; | |
141 | struct tb_ring *ring; | |
142 | }; | |
143 | ||
144 | /** | |
145 | * struct tbnet - ThunderboltIP network driver private data | |
146 | * @svc: XDomain service the driver is bound to | |
147 | * @xd: XDomain the service blongs to | |
148 | * @handler: ThunderboltIP configuration protocol handler | |
149 | * @dev: Networking device | |
150 | * @napi: NAPI structure for Rx polling | |
151 | * @stats: Network statistics | |
152 | * @skb: Network packet that is currently processed on Rx path | |
153 | * @command_id: ID used for next configuration protocol packet | |
154 | * @login_sent: ThunderboltIP login message successfully sent | |
155 | * @login_received: ThunderboltIP login message received from the remote | |
156 | * host | |
180b0689 MW |
157 | * @local_transmit_path: HopID we are using to send out packets |
158 | * @remote_transmit_path: HopID the other end is using to send packets to us | |
e69b6c02 AL |
159 | * @connection_lock: Lock serializing access to @login_sent, |
160 | * @login_received and @transmit_path. | |
161 | * @login_retries: Number of login retries currently done | |
162 | * @login_work: Worker to send ThunderboltIP login packets | |
163 | * @connected_work: Worker that finalizes the ThunderboltIP connection | |
164 | * setup and enables DMA paths for high speed data | |
165 | * transfers | |
027d351c MW |
166 | * @disconnect_work: Worker that handles tearing down the ThunderboltIP |
167 | * connection | |
e69b6c02 AL |
168 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a |
169 | * network packet consists of multiple Thunderbolt frames. | |
170 | * In host byte order. | |
171 | * @rx_ring: Software ring holding Rx frames | |
172 | * @frame_id: Frame ID use for next Tx packet | |
173 | * (if %TBNET_MATCH_FRAGS_ID is supported in both ends) | |
174 | * @tx_ring: Software ring holding Tx frames | |
175 | */ | |
176 | struct tbnet { | |
177 | const struct tb_service *svc; | |
178 | struct tb_xdomain *xd; | |
179 | struct tb_protocol_handler handler; | |
180 | struct net_device *dev; | |
181 | struct napi_struct napi; | |
182 | struct tbnet_stats stats; | |
183 | struct sk_buff *skb; | |
184 | atomic_t command_id; | |
185 | bool login_sent; | |
186 | bool login_received; | |
180b0689 MW |
187 | int local_transmit_path; |
188 | int remote_transmit_path; | |
e69b6c02 AL |
189 | struct mutex connection_lock; |
190 | int login_retries; | |
191 | struct delayed_work login_work; | |
192 | struct work_struct connected_work; | |
027d351c | 193 | struct work_struct disconnect_work; |
e69b6c02 AL |
194 | struct thunderbolt_ip_frame_header rx_hdr; |
195 | struct tbnet_ring rx_ring; | |
196 | atomic_t frame_id; | |
197 | struct tbnet_ring tx_ring; | |
198 | }; | |
199 | ||
200 | /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */ | |
201 | static const uuid_t tbnet_dir_uuid = | |
202 | UUID_INIT(0xc66189ca, 0x1cce, 0x4195, | |
203 | 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); | |
204 | ||
205 | /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */ | |
206 | static const uuid_t tbnet_svc_uuid = | |
207 | UUID_INIT(0x798f589e, 0x3616, 0x8a47, | |
208 | 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd); | |
209 | ||
210 | static struct tb_property_dir *tbnet_dir; | |
211 | ||
212 | static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route, | |
213 | u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid, | |
214 | enum thunderbolt_ip_type type, size_t size, u32 command_id) | |
215 | { | |
216 | u32 length_sn; | |
217 | ||
218 | /* Length does not include route_hi/lo and length_sn fields */ | |
219 | length_sn = (size - 3 * 4) / 4; | |
220 | length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK; | |
221 | ||
222 | hdr->route_hi = upper_32_bits(route); | |
223 | hdr->route_lo = lower_32_bits(route); | |
224 | hdr->length_sn = length_sn; | |
225 | uuid_copy(&hdr->uuid, &tbnet_svc_uuid); | |
226 | uuid_copy(&hdr->initiator_uuid, initiator_uuid); | |
227 | uuid_copy(&hdr->target_uuid, target_uuid); | |
228 | hdr->type = type; | |
229 | hdr->command_id = command_id; | |
230 | } | |
231 | ||
232 | static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, | |
233 | u32 command_id) | |
234 | { | |
235 | struct thunderbolt_ip_login_response reply; | |
236 | struct tb_xdomain *xd = net->xd; | |
237 | ||
238 | memset(&reply, 0, sizeof(reply)); | |
239 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
240 | xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), | |
241 | command_id); | |
242 | memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); | |
243 | reply.receiver_mac_len = ETH_ALEN; | |
244 | ||
245 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
246 | TB_CFG_PKG_XDOMAIN_RESP); | |
247 | } | |
248 | ||
249 | static int tbnet_login_request(struct tbnet *net, u8 sequence) | |
250 | { | |
251 | struct thunderbolt_ip_login_response reply; | |
252 | struct thunderbolt_ip_login request; | |
253 | struct tb_xdomain *xd = net->xd; | |
254 | ||
255 | memset(&request, 0, sizeof(request)); | |
256 | tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, | |
257 | xd->remote_uuid, TBIP_LOGIN, sizeof(request), | |
258 | atomic_inc_return(&net->command_id)); | |
259 | ||
260 | request.proto_version = TBIP_LOGIN_PROTO_VERSION; | |
180b0689 | 261 | request.transmit_path = net->local_transmit_path; |
e69b6c02 AL |
262 | |
263 | return tb_xdomain_request(xd, &request, sizeof(request), | |
264 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
265 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
266 | TBNET_LOGIN_TIMEOUT); | |
267 | } | |
268 | ||
269 | static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, | |
270 | u32 command_id) | |
271 | { | |
272 | struct thunderbolt_ip_status reply; | |
273 | struct tb_xdomain *xd = net->xd; | |
274 | ||
275 | memset(&reply, 0, sizeof(reply)); | |
276 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
277 | xd->remote_uuid, TBIP_STATUS, sizeof(reply), | |
278 | atomic_inc_return(&net->command_id)); | |
279 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
280 | TB_CFG_PKG_XDOMAIN_RESP); | |
281 | } | |
282 | ||
283 | static int tbnet_logout_request(struct tbnet *net) | |
284 | { | |
285 | struct thunderbolt_ip_logout request; | |
286 | struct thunderbolt_ip_status reply; | |
287 | struct tb_xdomain *xd = net->xd; | |
288 | ||
289 | memset(&request, 0, sizeof(request)); | |
290 | tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, | |
291 | xd->remote_uuid, TBIP_LOGOUT, sizeof(request), | |
292 | atomic_inc_return(&net->command_id)); | |
293 | ||
294 | return tb_xdomain_request(xd, &request, sizeof(request), | |
295 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
296 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
297 | TBNET_LOGOUT_TIMEOUT); | |
298 | } | |
299 | ||
300 | static void start_login(struct tbnet *net) | |
301 | { | |
302 | mutex_lock(&net->connection_lock); | |
303 | net->login_sent = false; | |
304 | net->login_received = false; | |
305 | mutex_unlock(&net->connection_lock); | |
306 | ||
307 | queue_delayed_work(system_long_wq, &net->login_work, | |
308 | msecs_to_jiffies(1000)); | |
309 | } | |
310 | ||
311 | static void stop_login(struct tbnet *net) | |
312 | { | |
313 | cancel_delayed_work_sync(&net->login_work); | |
314 | cancel_work_sync(&net->connected_work); | |
315 | } | |
316 | ||
317 | static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) | |
318 | { | |
319 | return tf->frame.size ? : TBNET_FRAME_SIZE; | |
320 | } | |
321 | ||
322 | static void tbnet_free_buffers(struct tbnet_ring *ring) | |
323 | { | |
324 | unsigned int i; | |
325 | ||
326 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
327 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
328 | struct tbnet_frame *tf = &ring->frames[i]; | |
329 | enum dma_data_direction dir; | |
330 | unsigned int order; | |
331 | size_t size; | |
332 | ||
333 | if (!tf->page) | |
334 | continue; | |
335 | ||
336 | if (ring->ring->is_tx) { | |
337 | dir = DMA_TO_DEVICE; | |
338 | order = 0; | |
540c1115 | 339 | size = TBNET_FRAME_SIZE; |
e69b6c02 AL |
340 | } else { |
341 | dir = DMA_FROM_DEVICE; | |
342 | order = TBNET_RX_PAGE_ORDER; | |
343 | size = TBNET_RX_PAGE_SIZE; | |
344 | } | |
345 | ||
346 | if (tf->frame.buffer_phy) | |
347 | dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, | |
348 | dir); | |
349 | ||
350 | __free_pages(tf->page, order); | |
351 | tf->page = NULL; | |
352 | } | |
353 | ||
354 | ring->cons = 0; | |
355 | ring->prod = 0; | |
356 | } | |
357 | ||
358 | static void tbnet_tear_down(struct tbnet *net, bool send_logout) | |
359 | { | |
360 | netif_carrier_off(net->dev); | |
361 | netif_stop_queue(net->dev); | |
362 | ||
363 | stop_login(net); | |
364 | ||
365 | mutex_lock(&net->connection_lock); | |
366 | ||
367 | if (net->login_sent && net->login_received) { | |
180b0689 | 368 | int ret, retries = TBNET_LOGOUT_RETRIES; |
e69b6c02 AL |
369 | |
370 | while (send_logout && retries-- > 0) { | |
180b0689 | 371 | ret = tbnet_logout_request(net); |
e69b6c02 AL |
372 | if (ret != -ETIMEDOUT) |
373 | break; | |
374 | } | |
375 | ||
376 | tb_ring_stop(net->rx_ring.ring); | |
377 | tb_ring_stop(net->tx_ring.ring); | |
378 | tbnet_free_buffers(&net->rx_ring); | |
379 | tbnet_free_buffers(&net->tx_ring); | |
380 | ||
180b0689 MW |
381 | ret = tb_xdomain_disable_paths(net->xd, |
382 | net->local_transmit_path, | |
383 | net->rx_ring.ring->hop, | |
384 | net->remote_transmit_path, | |
385 | net->tx_ring.ring->hop); | |
386 | if (ret) | |
e69b6c02 | 387 | netdev_warn(net->dev, "failed to disable DMA paths\n"); |
180b0689 MW |
388 | |
389 | tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); | |
390 | net->remote_transmit_path = 0; | |
e69b6c02 AL |
391 | } |
392 | ||
393 | net->login_retries = 0; | |
394 | net->login_sent = false; | |
395 | net->login_received = false; | |
396 | ||
397 | mutex_unlock(&net->connection_lock); | |
398 | } | |
399 | ||
400 | static int tbnet_handle_packet(const void *buf, size_t size, void *data) | |
401 | { | |
402 | const struct thunderbolt_ip_login *pkg = buf; | |
403 | struct tbnet *net = data; | |
404 | u32 command_id; | |
405 | int ret = 0; | |
fa31f0c9 | 406 | u32 sequence; |
e69b6c02 AL |
407 | u64 route; |
408 | ||
409 | /* Make sure the packet is for us */ | |
410 | if (size < sizeof(struct thunderbolt_ip_header)) | |
411 | return 0; | |
412 | if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) | |
413 | return 0; | |
414 | if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) | |
415 | return 0; | |
416 | ||
417 | route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; | |
418 | route &= ~BIT_ULL(63); | |
419 | if (route != net->xd->route) | |
420 | return 0; | |
421 | ||
422 | sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK; | |
423 | sequence >>= TBIP_HDR_SN_SHIFT; | |
424 | command_id = pkg->hdr.command_id; | |
425 | ||
426 | switch (pkg->hdr.type) { | |
427 | case TBIP_LOGIN: | |
428 | if (!netif_running(net->dev)) | |
429 | break; | |
430 | ||
431 | ret = tbnet_login_response(net, route, sequence, | |
432 | pkg->hdr.command_id); | |
433 | if (!ret) { | |
434 | mutex_lock(&net->connection_lock); | |
435 | net->login_received = true; | |
180b0689 | 436 | net->remote_transmit_path = pkg->transmit_path; |
e69b6c02 AL |
437 | |
438 | /* If we reached the number of max retries or | |
439 | * previous logout, schedule another round of | |
440 | * login retries | |
441 | */ | |
442 | if (net->login_retries >= TBNET_LOGIN_RETRIES || | |
443 | !net->login_sent) { | |
444 | net->login_retries = 0; | |
445 | queue_delayed_work(system_long_wq, | |
446 | &net->login_work, 0); | |
447 | } | |
448 | mutex_unlock(&net->connection_lock); | |
449 | ||
450 | queue_work(system_long_wq, &net->connected_work); | |
451 | } | |
452 | break; | |
453 | ||
454 | case TBIP_LOGOUT: | |
455 | ret = tbnet_logout_response(net, route, sequence, command_id); | |
456 | if (!ret) | |
027d351c | 457 | queue_work(system_long_wq, &net->disconnect_work); |
e69b6c02 AL |
458 | break; |
459 | ||
460 | default: | |
461 | return 0; | |
462 | } | |
463 | ||
464 | if (ret) | |
465 | netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); | |
466 | ||
467 | return 1; | |
468 | } | |
469 | ||
470 | static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring) | |
471 | { | |
472 | return ring->prod - ring->cons; | |
473 | } | |
474 | ||
475 | static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) | |
476 | { | |
477 | struct tbnet_ring *ring = &net->rx_ring; | |
478 | int ret; | |
479 | ||
480 | while (nbuffers--) { | |
481 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
482 | unsigned int index = ring->prod & (TBNET_RING_SIZE - 1); | |
483 | struct tbnet_frame *tf = &ring->frames[index]; | |
484 | dma_addr_t dma_addr; | |
485 | ||
486 | if (tf->page) | |
487 | break; | |
488 | ||
489 | /* Allocate page (order > 0) so that it can hold maximum | |
490 | * ThunderboltIP frame (4kB) and the additional room for | |
491 | * SKB shared info required by build_skb(). | |
492 | */ | |
493 | tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); | |
494 | if (!tf->page) { | |
495 | ret = -ENOMEM; | |
496 | goto err_free; | |
497 | } | |
498 | ||
499 | dma_addr = dma_map_page(dma_dev, tf->page, 0, | |
500 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
501 | if (dma_mapping_error(dma_dev, dma_addr)) { | |
502 | ret = -ENOMEM; | |
503 | goto err_free; | |
504 | } | |
505 | ||
506 | tf->frame.buffer_phy = dma_addr; | |
507 | tf->dev = net->dev; | |
508 | ||
509 | tb_ring_rx(ring->ring, &tf->frame); | |
510 | ||
511 | ring->prod++; | |
512 | } | |
513 | ||
514 | return 0; | |
515 | ||
516 | err_free: | |
517 | tbnet_free_buffers(ring); | |
518 | return ret; | |
519 | } | |
520 | ||
521 | static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) | |
522 | { | |
523 | struct tbnet_ring *ring = &net->tx_ring; | |
540c1115 | 524 | struct device *dma_dev = tb_ring_dma_device(ring->ring); |
e69b6c02 AL |
525 | struct tbnet_frame *tf; |
526 | unsigned int index; | |
527 | ||
528 | if (!tbnet_available_buffers(ring)) | |
529 | return NULL; | |
530 | ||
531 | index = ring->cons++ & (TBNET_RING_SIZE - 1); | |
532 | ||
533 | tf = &ring->frames[index]; | |
534 | tf->frame.size = 0; | |
540c1115 MW |
535 | |
536 | dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, | |
537 | tbnet_frame_size(tf), DMA_TO_DEVICE); | |
e69b6c02 AL |
538 | |
539 | return tf; | |
540 | } | |
541 | ||
542 | static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | |
543 | bool canceled) | |
544 | { | |
545 | struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); | |
e69b6c02 AL |
546 | struct tbnet *net = netdev_priv(tf->dev); |
547 | ||
e69b6c02 AL |
548 | /* Return buffer to the ring */ |
549 | net->tx_ring.prod++; | |
550 | ||
551 | if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) | |
552 | netif_wake_queue(net->dev); | |
553 | } | |
554 | ||
555 | static int tbnet_alloc_tx_buffers(struct tbnet *net) | |
556 | { | |
557 | struct tbnet_ring *ring = &net->tx_ring; | |
540c1115 | 558 | struct device *dma_dev = tb_ring_dma_device(ring->ring); |
e69b6c02 AL |
559 | unsigned int i; |
560 | ||
561 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
562 | struct tbnet_frame *tf = &ring->frames[i]; | |
540c1115 | 563 | dma_addr_t dma_addr; |
e69b6c02 AL |
564 | |
565 | tf->page = alloc_page(GFP_KERNEL); | |
566 | if (!tf->page) { | |
567 | tbnet_free_buffers(ring); | |
568 | return -ENOMEM; | |
569 | } | |
570 | ||
540c1115 MW |
571 | dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, |
572 | DMA_TO_DEVICE); | |
573 | if (dma_mapping_error(dma_dev, dma_addr)) { | |
574 | __free_page(tf->page); | |
575 | tf->page = NULL; | |
576 | tbnet_free_buffers(ring); | |
577 | return -ENOMEM; | |
578 | } | |
579 | ||
e69b6c02 | 580 | tf->dev = net->dev; |
540c1115 | 581 | tf->frame.buffer_phy = dma_addr; |
e69b6c02 AL |
582 | tf->frame.callback = tbnet_tx_callback; |
583 | tf->frame.sof = TBIP_PDF_FRAME_START; | |
584 | tf->frame.eof = TBIP_PDF_FRAME_END; | |
585 | } | |
586 | ||
587 | ring->cons = 0; | |
588 | ring->prod = TBNET_RING_SIZE - 1; | |
589 | ||
590 | return 0; | |
591 | } | |
592 | ||
593 | static void tbnet_connected_work(struct work_struct *work) | |
594 | { | |
595 | struct tbnet *net = container_of(work, typeof(*net), connected_work); | |
596 | bool connected; | |
597 | int ret; | |
598 | ||
599 | if (netif_carrier_ok(net->dev)) | |
600 | return; | |
601 | ||
602 | mutex_lock(&net->connection_lock); | |
603 | connected = net->login_sent && net->login_received; | |
604 | mutex_unlock(&net->connection_lock); | |
605 | ||
606 | if (!connected) | |
607 | return; | |
608 | ||
180b0689 MW |
609 | ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); |
610 | if (ret != net->remote_transmit_path) { | |
611 | netdev_err(net->dev, "failed to allocate Rx HopID\n"); | |
612 | return; | |
613 | } | |
614 | ||
e69b6c02 AL |
615 | /* Both logins successful so enable the high-speed DMA paths and |
616 | * start the network device queue. | |
617 | */ | |
180b0689 | 618 | ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, |
e69b6c02 | 619 | net->rx_ring.ring->hop, |
180b0689 | 620 | net->remote_transmit_path, |
e69b6c02 AL |
621 | net->tx_ring.ring->hop); |
622 | if (ret) { | |
623 | netdev_err(net->dev, "failed to enable DMA paths\n"); | |
624 | return; | |
625 | } | |
626 | ||
627 | tb_ring_start(net->tx_ring.ring); | |
628 | tb_ring_start(net->rx_ring.ring); | |
629 | ||
630 | ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); | |
631 | if (ret) | |
632 | goto err_stop_rings; | |
633 | ||
634 | ret = tbnet_alloc_tx_buffers(net); | |
635 | if (ret) | |
636 | goto err_free_rx_buffers; | |
637 | ||
638 | netif_carrier_on(net->dev); | |
639 | netif_start_queue(net->dev); | |
640 | return; | |
641 | ||
642 | err_free_rx_buffers: | |
643 | tbnet_free_buffers(&net->rx_ring); | |
644 | err_stop_rings: | |
645 | tb_ring_stop(net->rx_ring.ring); | |
646 | tb_ring_stop(net->tx_ring.ring); | |
180b0689 | 647 | tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); |
e69b6c02 AL |
648 | } |
649 | ||
650 | static void tbnet_login_work(struct work_struct *work) | |
651 | { | |
652 | struct tbnet *net = container_of(work, typeof(*net), login_work.work); | |
653 | unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY); | |
654 | int ret; | |
655 | ||
656 | if (netif_carrier_ok(net->dev)) | |
657 | return; | |
658 | ||
659 | ret = tbnet_login_request(net, net->login_retries % 4); | |
660 | if (ret) { | |
661 | if (net->login_retries++ < TBNET_LOGIN_RETRIES) { | |
662 | queue_delayed_work(system_long_wq, &net->login_work, | |
663 | delay); | |
664 | } else { | |
665 | netdev_info(net->dev, "ThunderboltIP login timed out\n"); | |
666 | } | |
667 | } else { | |
668 | net->login_retries = 0; | |
669 | ||
670 | mutex_lock(&net->connection_lock); | |
671 | net->login_sent = true; | |
672 | mutex_unlock(&net->connection_lock); | |
673 | ||
674 | queue_work(system_long_wq, &net->connected_work); | |
675 | } | |
676 | } | |
677 | ||
027d351c MW |
678 | static void tbnet_disconnect_work(struct work_struct *work) |
679 | { | |
680 | struct tbnet *net = container_of(work, typeof(*net), disconnect_work); | |
681 | ||
682 | tbnet_tear_down(net, false); | |
683 | } | |
684 | ||
e69b6c02 AL |
685 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, |
686 | const struct thunderbolt_ip_frame_header *hdr) | |
687 | { | |
688 | u32 frame_id, frame_count, frame_size, frame_index; | |
689 | unsigned int size; | |
690 | ||
691 | if (tf->frame.flags & RING_DESC_CRC_ERROR) { | |
692 | net->stats.rx_crc_errors++; | |
693 | return false; | |
694 | } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { | |
695 | net->stats.rx_over_errors++; | |
696 | return false; | |
697 | } | |
698 | ||
699 | /* Should be greater than just header i.e. contains data */ | |
700 | size = tbnet_frame_size(tf); | |
701 | if (size <= sizeof(*hdr)) { | |
702 | net->stats.rx_length_errors++; | |
703 | return false; | |
704 | } | |
705 | ||
706 | frame_count = le32_to_cpu(hdr->frame_count); | |
707 | frame_size = le32_to_cpu(hdr->frame_size); | |
708 | frame_index = le16_to_cpu(hdr->frame_index); | |
709 | frame_id = le16_to_cpu(hdr->frame_id); | |
710 | ||
711 | if ((frame_size > size - sizeof(*hdr)) || !frame_size) { | |
712 | net->stats.rx_length_errors++; | |
713 | return false; | |
714 | } | |
715 | ||
716 | /* In case we're in the middle of packet, validate the frame | |
717 | * header based on first fragment of the packet. | |
718 | */ | |
719 | if (net->skb && net->rx_hdr.frame_count) { | |
720 | /* Check the frame count fits the count field */ | |
721 | if (frame_count != net->rx_hdr.frame_count) { | |
722 | net->stats.rx_length_errors++; | |
723 | return false; | |
724 | } | |
725 | ||
726 | /* Check the frame identifiers are incremented correctly, | |
727 | * and id is matching. | |
728 | */ | |
729 | if (frame_index != net->rx_hdr.frame_index + 1 || | |
730 | frame_id != net->rx_hdr.frame_id) { | |
731 | net->stats.rx_missed_errors++; | |
732 | return false; | |
733 | } | |
734 | ||
735 | if (net->skb->len + frame_size > TBNET_MAX_MTU) { | |
736 | net->stats.rx_length_errors++; | |
737 | return false; | |
738 | } | |
739 | ||
740 | return true; | |
741 | } | |
742 | ||
743 | /* Start of packet, validate the frame header */ | |
744 | if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { | |
745 | net->stats.rx_length_errors++; | |
746 | return false; | |
747 | } | |
748 | if (frame_index != 0) { | |
749 | net->stats.rx_missed_errors++; | |
750 | return false; | |
751 | } | |
752 | ||
753 | return true; | |
754 | } | |
755 | ||
756 | static int tbnet_poll(struct napi_struct *napi, int budget) | |
757 | { | |
758 | struct tbnet *net = container_of(napi, struct tbnet, napi); | |
759 | unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); | |
760 | struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); | |
761 | unsigned int rx_packets = 0; | |
762 | ||
763 | while (rx_packets < budget) { | |
764 | const struct thunderbolt_ip_frame_header *hdr; | |
765 | unsigned int hdr_size = sizeof(*hdr); | |
766 | struct sk_buff *skb = NULL; | |
767 | struct ring_frame *frame; | |
768 | struct tbnet_frame *tf; | |
769 | struct page *page; | |
770 | bool last = true; | |
771 | u32 frame_size; | |
772 | ||
773 | /* Return some buffers to hardware, one at a time is too | |
774 | * slow so allocate MAX_SKB_FRAGS buffers at the same | |
775 | * time. | |
776 | */ | |
777 | if (cleaned_count >= MAX_SKB_FRAGS) { | |
778 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
779 | cleaned_count = 0; | |
780 | } | |
781 | ||
782 | frame = tb_ring_poll(net->rx_ring.ring); | |
783 | if (!frame) | |
784 | break; | |
785 | ||
786 | dma_unmap_page(dma_dev, frame->buffer_phy, | |
787 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
788 | ||
789 | tf = container_of(frame, typeof(*tf), frame); | |
790 | ||
791 | page = tf->page; | |
792 | tf->page = NULL; | |
793 | net->rx_ring.cons++; | |
794 | cleaned_count++; | |
795 | ||
796 | hdr = page_address(page); | |
797 | if (!tbnet_check_frame(net, tf, hdr)) { | |
798 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
799 | dev_kfree_skb_any(net->skb); | |
800 | net->skb = NULL; | |
801 | continue; | |
802 | } | |
803 | ||
804 | frame_size = le32_to_cpu(hdr->frame_size); | |
805 | ||
806 | skb = net->skb; | |
807 | if (!skb) { | |
808 | skb = build_skb(page_address(page), | |
809 | TBNET_RX_PAGE_SIZE); | |
810 | if (!skb) { | |
811 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
812 | net->stats.rx_errors++; | |
813 | break; | |
814 | } | |
815 | ||
816 | skb_reserve(skb, hdr_size); | |
817 | skb_put(skb, frame_size); | |
818 | ||
819 | net->skb = skb; | |
820 | } else { | |
821 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
822 | page, hdr_size, frame_size, | |
823 | TBNET_RX_PAGE_SIZE - hdr_size); | |
824 | } | |
825 | ||
826 | net->rx_hdr.frame_size = frame_size; | |
827 | net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); | |
828 | net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); | |
829 | net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); | |
830 | last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; | |
831 | ||
832 | rx_packets++; | |
833 | net->stats.rx_bytes += frame_size; | |
834 | ||
835 | if (last) { | |
836 | skb->protocol = eth_type_trans(skb, net->dev); | |
837 | napi_gro_receive(&net->napi, skb); | |
838 | net->skb = NULL; | |
839 | } | |
840 | } | |
841 | ||
842 | net->stats.rx_packets += rx_packets; | |
843 | ||
844 | if (cleaned_count) | |
845 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
846 | ||
847 | if (rx_packets >= budget) | |
848 | return budget; | |
849 | ||
850 | napi_complete_done(napi, rx_packets); | |
851 | /* Re-enable the ring interrupt */ | |
852 | tb_ring_poll_complete(net->rx_ring.ring); | |
853 | ||
854 | return rx_packets; | |
855 | } | |
856 | ||
857 | static void tbnet_start_poll(void *data) | |
858 | { | |
859 | struct tbnet *net = data; | |
860 | ||
861 | napi_schedule(&net->napi); | |
862 | } | |
863 | ||
864 | static int tbnet_open(struct net_device *dev) | |
865 | { | |
866 | struct tbnet *net = netdev_priv(dev); | |
867 | struct tb_xdomain *xd = net->xd; | |
868 | u16 sof_mask, eof_mask; | |
869 | struct tb_ring *ring; | |
180b0689 | 870 | int hopid; |
e69b6c02 AL |
871 | |
872 | netif_carrier_off(dev); | |
873 | ||
874 | ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, | |
875 | RING_FLAG_FRAME); | |
876 | if (!ring) { | |
877 | netdev_err(dev, "failed to allocate Tx ring\n"); | |
878 | return -ENOMEM; | |
879 | } | |
880 | net->tx_ring.ring = ring; | |
881 | ||
180b0689 MW |
882 | hopid = tb_xdomain_alloc_out_hopid(xd, -1); |
883 | if (hopid < 0) { | |
884 | netdev_err(dev, "failed to allocate Tx HopID\n"); | |
885 | tb_ring_free(net->tx_ring.ring); | |
886 | net->tx_ring.ring = NULL; | |
887 | return hopid; | |
888 | } | |
889 | net->local_transmit_path = hopid; | |
890 | ||
e69b6c02 AL |
891 | sof_mask = BIT(TBIP_PDF_FRAME_START); |
892 | eof_mask = BIT(TBIP_PDF_FRAME_END); | |
893 | ||
894 | ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, | |
afe704a2 | 895 | RING_FLAG_FRAME, 0, sof_mask, eof_mask, |
53f13319 | 896 | tbnet_start_poll, net); |
e69b6c02 AL |
897 | if (!ring) { |
898 | netdev_err(dev, "failed to allocate Rx ring\n"); | |
899 | tb_ring_free(net->tx_ring.ring); | |
900 | net->tx_ring.ring = NULL; | |
901 | return -ENOMEM; | |
902 | } | |
903 | net->rx_ring.ring = ring; | |
904 | ||
905 | napi_enable(&net->napi); | |
906 | start_login(net); | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | static int tbnet_stop(struct net_device *dev) | |
912 | { | |
913 | struct tbnet *net = netdev_priv(dev); | |
914 | ||
915 | napi_disable(&net->napi); | |
916 | ||
027d351c | 917 | cancel_work_sync(&net->disconnect_work); |
e69b6c02 AL |
918 | tbnet_tear_down(net, true); |
919 | ||
920 | tb_ring_free(net->rx_ring.ring); | |
921 | net->rx_ring.ring = NULL; | |
180b0689 MW |
922 | |
923 | tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path); | |
e69b6c02 AL |
924 | tb_ring_free(net->tx_ring.ring); |
925 | net->tx_ring.ring = NULL; | |
926 | ||
927 | return 0; | |
928 | } | |
929 | ||
e69b6c02 AL |
930 | static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, |
931 | struct tbnet_frame **frames, u32 frame_count) | |
932 | { | |
933 | struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); | |
934 | struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); | |
935 | __wsum wsum = htonl(skb->len - skb_transport_offset(skb)); | |
936 | unsigned int i, len, offset = skb_transport_offset(skb); | |
937 | __be16 protocol = skb->protocol; | |
938 | void *data = skb->data; | |
939 | void *dest = hdr + 1; | |
940 | __sum16 *tucso; | |
941 | ||
942 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | |
943 | /* No need to calculate checksum so we just update the | |
540c1115 | 944 | * total frame count and sync the frames for DMA. |
e69b6c02 AL |
945 | */ |
946 | for (i = 0; i < frame_count; i++) { | |
947 | hdr = page_address(frames[i]->page); | |
948 | hdr->frame_count = cpu_to_le32(frame_count); | |
540c1115 MW |
949 | dma_sync_single_for_device(dma_dev, |
950 | frames[i]->frame.buffer_phy, | |
951 | tbnet_frame_size(frames[i]), DMA_TO_DEVICE); | |
e69b6c02 AL |
952 | } |
953 | ||
954 | return true; | |
955 | } | |
956 | ||
957 | if (protocol == htons(ETH_P_8021Q)) { | |
958 | struct vlan_hdr *vhdr, vh; | |
959 | ||
960 | vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh); | |
961 | if (!vhdr) | |
962 | return false; | |
963 | ||
964 | protocol = vhdr->h_vlan_encapsulated_proto; | |
965 | } | |
966 | ||
967 | /* Data points on the beginning of packet. | |
968 | * Check is the checksum absolute place in the packet. | |
969 | * ipcso will update IP checksum. | |
970 | * tucso will update TCP/UPD checksum. | |
971 | */ | |
972 | if (protocol == htons(ETH_P_IP)) { | |
973 | __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data); | |
974 | ||
975 | *ipcso = 0; | |
976 | *ipcso = ip_fast_csum(dest + skb_network_offset(skb), | |
977 | ip_hdr(skb)->ihl); | |
978 | ||
979 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
980 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
981 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
982 | tucso = dest + ((void *)&(udp_hdr(skb)->check) - data); | |
983 | else | |
984 | return false; | |
985 | ||
986 | *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
987 | ip_hdr(skb)->daddr, 0, | |
988 | ip_hdr(skb)->protocol, 0); | |
989 | } else if (skb_is_gso_v6(skb)) { | |
990 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
991 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
992 | &ipv6_hdr(skb)->daddr, 0, | |
993 | IPPROTO_TCP, 0); | |
994 | return false; | |
995 | } else if (protocol == htons(ETH_P_IPV6)) { | |
996 | tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; | |
997 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
998 | &ipv6_hdr(skb)->daddr, 0, | |
999 | ipv6_hdr(skb)->nexthdr, 0); | |
1000 | } else { | |
1001 | return false; | |
1002 | } | |
1003 | ||
1004 | /* First frame was headers, rest of the frames contain data. | |
1005 | * Calculate checksum over each frame. | |
1006 | */ | |
1007 | for (i = 0; i < frame_count; i++) { | |
1008 | hdr = page_address(frames[i]->page); | |
1009 | dest = (void *)(hdr + 1) + offset; | |
1010 | len = le32_to_cpu(hdr->frame_size) - offset; | |
1011 | wsum = csum_partial(dest, len, wsum); | |
1012 | hdr->frame_count = cpu_to_le32(frame_count); | |
1013 | ||
1014 | offset = 0; | |
1015 | } | |
1016 | ||
1017 | *tucso = csum_fold(wsum); | |
1018 | ||
1019 | /* Checksum is finally calculated and we don't touch the memory | |
540c1115 | 1020 | * anymore, so DMA sync the frames now. |
e69b6c02 AL |
1021 | */ |
1022 | for (i = 0; i < frame_count; i++) { | |
540c1115 MW |
1023 | dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, |
1024 | tbnet_frame_size(frames[i]), DMA_TO_DEVICE); | |
e69b6c02 AL |
1025 | } |
1026 | ||
1027 | return true; | |
e69b6c02 AL |
1028 | } |
1029 | ||
1030 | static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, | |
1031 | unsigned int *len) | |
1032 | { | |
1033 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; | |
1034 | ||
1035 | *len = skb_frag_size(frag); | |
b54c9d5b | 1036 | return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); |
e69b6c02 AL |
1037 | } |
1038 | ||
1039 | static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, | |
1040 | struct net_device *dev) | |
1041 | { | |
1042 | struct tbnet *net = netdev_priv(dev); | |
1043 | struct tbnet_frame *frames[MAX_SKB_FRAGS]; | |
1044 | u16 frame_id = atomic_read(&net->frame_id); | |
1045 | struct thunderbolt_ip_frame_header *hdr; | |
1046 | unsigned int len = skb_headlen(skb); | |
1047 | unsigned int data_len = skb->len; | |
1048 | unsigned int nframes, i; | |
1049 | unsigned int frag = 0; | |
1050 | void *src = skb->data; | |
1051 | u32 frame_index = 0; | |
1052 | bool unmap = false; | |
1053 | void *dest; | |
1054 | ||
1055 | nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); | |
1056 | if (tbnet_available_buffers(&net->tx_ring) < nframes) { | |
1057 | netif_stop_queue(net->dev); | |
1058 | return NETDEV_TX_BUSY; | |
1059 | } | |
1060 | ||
1061 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1062 | if (!frames[frame_index]) | |
1063 | goto err_drop; | |
1064 | ||
1065 | hdr = page_address(frames[frame_index]->page); | |
1066 | dest = hdr + 1; | |
1067 | ||
1068 | /* If overall packet is bigger than the frame data size */ | |
1069 | while (data_len > TBNET_MAX_PAYLOAD_SIZE) { | |
1070 | unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE; | |
1071 | ||
1072 | hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE); | |
1073 | hdr->frame_index = cpu_to_le16(frame_index); | |
1074 | hdr->frame_id = cpu_to_le16(frame_id); | |
1075 | ||
1076 | do { | |
1077 | if (len > size_left) { | |
1078 | /* Copy data onto Tx buffer data with | |
1079 | * full frame size then break and go to | |
1080 | * next frame | |
1081 | */ | |
1082 | memcpy(dest, src, size_left); | |
1083 | len -= size_left; | |
1084 | dest += size_left; | |
1085 | src += size_left; | |
1086 | break; | |
1087 | } | |
1088 | ||
1089 | memcpy(dest, src, len); | |
1090 | size_left -= len; | |
1091 | dest += len; | |
1092 | ||
1093 | if (unmap) { | |
1094 | kunmap_atomic(src); | |
1095 | unmap = false; | |
1096 | } | |
1097 | ||
1098 | /* Ensure all fragments have been processed */ | |
1099 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1100 | /* Map and then unmap quickly */ | |
1101 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1102 | unmap = true; | |
1103 | } else if (unlikely(size_left > 0)) { | |
1104 | goto err_drop; | |
1105 | } | |
1106 | } while (size_left > 0); | |
1107 | ||
1108 | data_len -= TBNET_MAX_PAYLOAD_SIZE; | |
1109 | frame_index++; | |
1110 | ||
1111 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1112 | if (!frames[frame_index]) | |
1113 | goto err_drop; | |
1114 | ||
1115 | hdr = page_address(frames[frame_index]->page); | |
1116 | dest = hdr + 1; | |
1117 | } | |
1118 | ||
1119 | hdr->frame_size = cpu_to_le32(data_len); | |
1120 | hdr->frame_index = cpu_to_le16(frame_index); | |
1121 | hdr->frame_id = cpu_to_le16(frame_id); | |
1122 | ||
1123 | frames[frame_index]->frame.size = data_len + sizeof(*hdr); | |
1124 | ||
1125 | /* In case the remaining data_len is smaller than a frame */ | |
1126 | while (len < data_len) { | |
1127 | memcpy(dest, src, len); | |
1128 | data_len -= len; | |
1129 | dest += len; | |
1130 | ||
1131 | if (unmap) { | |
1132 | kunmap_atomic(src); | |
1133 | unmap = false; | |
1134 | } | |
1135 | ||
1136 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1137 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1138 | unmap = true; | |
1139 | } else if (unlikely(data_len > 0)) { | |
1140 | goto err_drop; | |
1141 | } | |
1142 | } | |
1143 | ||
1144 | memcpy(dest, src, data_len); | |
1145 | ||
1146 | if (unmap) | |
1147 | kunmap_atomic(src); | |
1148 | ||
1149 | if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) | |
1150 | goto err_drop; | |
1151 | ||
1152 | for (i = 0; i < frame_index + 1; i++) | |
1153 | tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); | |
1154 | ||
1155 | if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) | |
1156 | atomic_inc(&net->frame_id); | |
1157 | ||
1158 | net->stats.tx_packets++; | |
1159 | net->stats.tx_bytes += skb->len; | |
1160 | ||
1161 | dev_consume_skb_any(skb); | |
1162 | ||
1163 | return NETDEV_TX_OK; | |
1164 | ||
1165 | err_drop: | |
1166 | /* We can re-use the buffers */ | |
1167 | net->tx_ring.cons -= frame_index; | |
1168 | ||
1169 | dev_kfree_skb_any(skb); | |
1170 | net->stats.tx_errors++; | |
1171 | ||
1172 | return NETDEV_TX_OK; | |
1173 | } | |
1174 | ||
1175 | static void tbnet_get_stats64(struct net_device *dev, | |
1176 | struct rtnl_link_stats64 *stats) | |
1177 | { | |
1178 | struct tbnet *net = netdev_priv(dev); | |
1179 | ||
1180 | stats->tx_packets = net->stats.tx_packets; | |
1181 | stats->rx_packets = net->stats.rx_packets; | |
1182 | stats->tx_bytes = net->stats.tx_bytes; | |
1183 | stats->rx_bytes = net->stats.rx_bytes; | |
1184 | stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + | |
1185 | net->stats.rx_over_errors + net->stats.rx_crc_errors + | |
1186 | net->stats.rx_missed_errors; | |
1187 | stats->tx_errors = net->stats.tx_errors; | |
1188 | stats->rx_length_errors = net->stats.rx_length_errors; | |
1189 | stats->rx_over_errors = net->stats.rx_over_errors; | |
1190 | stats->rx_crc_errors = net->stats.rx_crc_errors; | |
1191 | stats->rx_missed_errors = net->stats.rx_missed_errors; | |
1192 | } | |
1193 | ||
1194 | static const struct net_device_ops tbnet_netdev_ops = { | |
1195 | .ndo_open = tbnet_open, | |
1196 | .ndo_stop = tbnet_stop, | |
1197 | .ndo_start_xmit = tbnet_start_xmit, | |
1198 | .ndo_get_stats64 = tbnet_get_stats64, | |
1199 | }; | |
1200 | ||
1201 | static void tbnet_generate_mac(struct net_device *dev) | |
1202 | { | |
1203 | const struct tbnet *net = netdev_priv(dev); | |
1204 | const struct tb_xdomain *xd = net->xd; | |
1205 | u8 phy_port; | |
1206 | u32 hash; | |
1207 | ||
1208 | phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); | |
1209 | ||
1210 | /* Unicast and locally administered MAC */ | |
1211 | dev->dev_addr[0] = phy_port << 4 | 0x02; | |
1212 | hash = jhash2((u32 *)xd->local_uuid, 4, 0); | |
1213 | memcpy(dev->dev_addr + 1, &hash, sizeof(hash)); | |
1214 | hash = jhash2((u32 *)xd->local_uuid, 4, hash); | |
1215 | dev->dev_addr[5] = hash & 0xff; | |
1216 | } | |
1217 | ||
1218 | static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) | |
1219 | { | |
1220 | struct tb_xdomain *xd = tb_service_parent(svc); | |
1221 | struct net_device *dev; | |
1222 | struct tbnet *net; | |
1223 | int ret; | |
1224 | ||
1225 | dev = alloc_etherdev(sizeof(*net)); | |
1226 | if (!dev) | |
1227 | return -ENOMEM; | |
1228 | ||
1229 | SET_NETDEV_DEV(dev, &svc->dev); | |
1230 | ||
1231 | net = netdev_priv(dev); | |
1232 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); | |
1233 | INIT_WORK(&net->connected_work, tbnet_connected_work); | |
027d351c | 1234 | INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); |
e69b6c02 AL |
1235 | mutex_init(&net->connection_lock); |
1236 | atomic_set(&net->command_id, 0); | |
1237 | atomic_set(&net->frame_id, 0); | |
1238 | net->svc = svc; | |
1239 | net->dev = dev; | |
1240 | net->xd = xd; | |
1241 | ||
1242 | tbnet_generate_mac(dev); | |
1243 | ||
1244 | strcpy(dev->name, "thunderbolt%d"); | |
1245 | dev->netdev_ops = &tbnet_netdev_ops; | |
1246 | ||
1247 | /* ThunderboltIP takes advantage of TSO packets but instead of | |
1248 | * segmenting them we just split the packet into Thunderbolt | |
1249 | * frames (maximum payload size of each frame is 4084 bytes) and | |
1250 | * calculate checksum over the whole packet here. | |
1251 | * | |
1252 | * The receiving side does the opposite if the host OS supports | |
1253 | * LRO, otherwise it needs to split the large packet into MTU | |
1254 | * sized smaller packets. | |
1255 | * | |
1256 | * In order to receive large packets from the networking stack, | |
1257 | * we need to announce support for most of the offloading | |
1258 | * features here. | |
1259 | */ | |
1260 | dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO | | |
1261 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1262 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; | |
1263 | dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header); | |
1264 | ||
1265 | netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT); | |
1266 | ||
1267 | /* MTU range: 68 - 65522 */ | |
1268 | dev->min_mtu = ETH_MIN_MTU; | |
1269 | dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; | |
1270 | ||
1271 | net->handler.uuid = &tbnet_svc_uuid; | |
201d126a | 1272 | net->handler.callback = tbnet_handle_packet; |
e69b6c02 AL |
1273 | net->handler.data = net; |
1274 | tb_register_protocol_handler(&net->handler); | |
1275 | ||
1276 | tb_service_set_drvdata(svc, net); | |
1277 | ||
1278 | ret = register_netdev(dev); | |
1279 | if (ret) { | |
1280 | tb_unregister_protocol_handler(&net->handler); | |
1281 | free_netdev(dev); | |
1282 | return ret; | |
1283 | } | |
1284 | ||
1285 | return 0; | |
1286 | } | |
1287 | ||
1288 | static void tbnet_remove(struct tb_service *svc) | |
1289 | { | |
1290 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1291 | ||
1292 | unregister_netdev(net->dev); | |
1293 | tb_unregister_protocol_handler(&net->handler); | |
1294 | free_netdev(net->dev); | |
1295 | } | |
1296 | ||
1297 | static void tbnet_shutdown(struct tb_service *svc) | |
1298 | { | |
1299 | tbnet_tear_down(tb_service_get_drvdata(svc), true); | |
1300 | } | |
1301 | ||
1302 | static int __maybe_unused tbnet_suspend(struct device *dev) | |
1303 | { | |
1304 | struct tb_service *svc = tb_to_service(dev); | |
1305 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1306 | ||
1307 | stop_login(net); | |
1308 | if (netif_running(net->dev)) { | |
1309 | netif_device_detach(net->dev); | |
8e021a14 | 1310 | tbnet_tear_down(net, true); |
e69b6c02 AL |
1311 | } |
1312 | ||
9872760e | 1313 | tb_unregister_protocol_handler(&net->handler); |
e69b6c02 AL |
1314 | return 0; |
1315 | } | |
1316 | ||
1317 | static int __maybe_unused tbnet_resume(struct device *dev) | |
1318 | { | |
1319 | struct tb_service *svc = tb_to_service(dev); | |
1320 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1321 | ||
9872760e MW |
1322 | tb_register_protocol_handler(&net->handler); |
1323 | ||
e69b6c02 AL |
1324 | netif_carrier_off(net->dev); |
1325 | if (netif_running(net->dev)) { | |
1326 | netif_device_attach(net->dev); | |
1327 | start_login(net); | |
1328 | } | |
1329 | ||
1330 | return 0; | |
1331 | } | |
1332 | ||
1333 | static const struct dev_pm_ops tbnet_pm_ops = { | |
1334 | SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume) | |
1335 | }; | |
1336 | ||
1337 | static const struct tb_service_id tbnet_ids[] = { | |
1338 | { TB_SERVICE("network", 1) }, | |
1339 | { }, | |
1340 | }; | |
1341 | MODULE_DEVICE_TABLE(tbsvc, tbnet_ids); | |
1342 | ||
1343 | static struct tb_service_driver tbnet_driver = { | |
1344 | .driver = { | |
1345 | .owner = THIS_MODULE, | |
1346 | .name = "thunderbolt-net", | |
1347 | .pm = &tbnet_pm_ops, | |
1348 | }, | |
1349 | .probe = tbnet_probe, | |
1350 | .remove = tbnet_remove, | |
1351 | .shutdown = tbnet_shutdown, | |
1352 | .id_table = tbnet_ids, | |
1353 | }; | |
1354 | ||
1355 | static int __init tbnet_init(void) | |
1356 | { | |
1357 | int ret; | |
1358 | ||
1359 | tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid); | |
1360 | if (!tbnet_dir) | |
1361 | return -ENOMEM; | |
1362 | ||
1363 | tb_property_add_immediate(tbnet_dir, "prtcid", 1); | |
1364 | tb_property_add_immediate(tbnet_dir, "prtcvers", 1); | |
1365 | tb_property_add_immediate(tbnet_dir, "prtcrevs", 1); | |
547030c8 MW |
1366 | /* Currently only announce support for match frags ID (bit 1). Bit 0 |
1367 | * is reserved for full E2E flow control which we do not support at | |
1368 | * the moment. | |
1369 | */ | |
e69b6c02 | 1370 | tb_property_add_immediate(tbnet_dir, "prtcstns", |
95240075 | 1371 | TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES); |
e69b6c02 AL |
1372 | |
1373 | ret = tb_register_property_dir("network", tbnet_dir); | |
1374 | if (ret) { | |
1375 | tb_property_free_dir(tbnet_dir); | |
1376 | return ret; | |
1377 | } | |
1378 | ||
1379 | return tb_register_service_driver(&tbnet_driver); | |
1380 | } | |
1381 | module_init(tbnet_init); | |
1382 | ||
1383 | static void __exit tbnet_exit(void) | |
1384 | { | |
1385 | tb_unregister_service_driver(&tbnet_driver); | |
1386 | tb_unregister_property_dir("network", tbnet_dir); | |
1387 | tb_property_free_dir(tbnet_dir); | |
1388 | } | |
1389 | module_exit(tbnet_exit); | |
1390 | ||
1391 | MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>"); | |
1392 | MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>"); | |
1393 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); | |
1394 | MODULE_DESCRIPTION("Thunderbolt network driver"); | |
1395 | MODULE_LICENSE("GPL v2"); |