]>
Commit | Line | Data |
---|---|---|
7a291083 JBT |
1 | /* |
2 | * linux/drivers/net/ehea/ehea_main.c | |
3 | * | |
4 | * eHEA ethernet device driver for IBM eServer System p | |
5 | * | |
6 | * (C) Copyright IBM Corp. 2006 | |
7 | * | |
8 | * Authors: | |
9 | * Christoph Raisch <raisch@de.ibm.com> | |
10 | * Jan-Bernd Themann <themann@de.ibm.com> | |
11 | * Thomas Klein <tklein@de.ibm.com> | |
12 | * | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2, or (at your option) | |
17 | * any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
27 | */ | |
28 | ||
29 | #include <linux/in.h> | |
30 | #include <linux/ip.h> | |
31 | #include <linux/tcp.h> | |
32 | #include <linux/udp.h> | |
33 | #include <linux/if.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/if_ether.h> | |
36 | #include <net/ip.h> | |
37 | ||
38 | #include "ehea.h" | |
39 | #include "ehea_qmr.h" | |
40 | #include "ehea_phyp.h" | |
41 | ||
42 | ||
43 | MODULE_LICENSE("GPL"); | |
44 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | |
45 | MODULE_DESCRIPTION("IBM eServer HEA Driver"); | |
46 | MODULE_VERSION(DRV_VERSION); | |
47 | ||
48 | ||
49 | static int msg_level = -1; | |
50 | static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; | |
51 | static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; | |
52 | static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; | |
53 | static int sq_entries = EHEA_DEF_ENTRIES_SQ; | |
54 | ||
55 | module_param(msg_level, int, 0); | |
56 | module_param(rq1_entries, int, 0); | |
57 | module_param(rq2_entries, int, 0); | |
58 | module_param(rq3_entries, int, 0); | |
59 | module_param(sq_entries, int, 0); | |
60 | ||
61 | MODULE_PARM_DESC(msg_level, "msg_level"); | |
62 | MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " | |
63 | "[2^x - 1], x = [6..14]. Default = " | |
64 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); | |
65 | MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " | |
66 | "[2^x - 1], x = [6..14]. Default = " | |
67 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); | |
68 | MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " | |
69 | "[2^x - 1], x = [6..14]. Default = " | |
70 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); | |
71 | MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " | |
72 | "[2^x - 1], x = [6..14]. Default = " | |
73 | __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); | |
74 | ||
75 | void ehea_dump(void *adr, int len, char *msg) { | |
76 | int x; | |
77 | unsigned char *deb = adr; | |
78 | for (x = 0; x < len; x += 16) { | |
79 | printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg, | |
80 | deb, x, *((u64*)&deb[0]), *((u64*)&deb[8])); | |
81 | deb += 16; | |
82 | } | |
83 | } | |
84 | ||
85 | static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |
86 | { | |
87 | struct ehea_port *port = netdev_priv(dev); | |
88 | struct net_device_stats *stats = &port->stats; | |
89 | struct hcp_ehea_port_cb2 *cb2; | |
90 | u64 hret, rx_packets; | |
91 | int i; | |
92 | ||
93 | memset(stats, 0, sizeof(*stats)); | |
94 | ||
a1d261c5 | 95 | cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
96 | if (!cb2) { |
97 | ehea_error("no mem for cb2"); | |
98 | goto out; | |
99 | } | |
100 | ||
101 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
102 | port->logical_port_id, | |
103 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); | |
104 | if (hret != H_SUCCESS) { | |
105 | ehea_error("query_ehea_port failed"); | |
106 | goto out_herr; | |
107 | } | |
108 | ||
109 | if (netif_msg_hw(port)) | |
110 | ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); | |
111 | ||
112 | rx_packets = 0; | |
113 | for (i = 0; i < port->num_def_qps; i++) | |
114 | rx_packets += port->port_res[i].rx_packets; | |
115 | ||
116 | stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; | |
117 | stats->multicast = cb2->rxmcp; | |
118 | stats->rx_errors = cb2->rxuerr; | |
119 | stats->rx_bytes = cb2->rxo; | |
120 | stats->tx_bytes = cb2->txo; | |
121 | stats->rx_packets = rx_packets; | |
122 | ||
123 | out_herr: | |
124 | kfree(cb2); | |
125 | out: | |
126 | return stats; | |
127 | } | |
128 | ||
129 | static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |
130 | { | |
131 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
132 | struct net_device *dev = pr->port->netdev; | |
133 | int max_index_mask = pr->rq1_skba.len - 1; | |
134 | int i; | |
135 | ||
136 | if (!nr_of_wqes) | |
137 | return; | |
138 | ||
139 | for (i = 0; i < nr_of_wqes; i++) { | |
140 | if (!skb_arr_rq1[index]) { | |
141 | skb_arr_rq1[index] = netdev_alloc_skb(dev, | |
142 | EHEA_L_PKT_SIZE); | |
143 | if (!skb_arr_rq1[index]) { | |
144 | ehea_error("%s: no mem for skb/%d wqes filled", | |
145 | dev->name, i); | |
146 | break; | |
147 | } | |
148 | } | |
149 | index--; | |
150 | index &= max_index_mask; | |
151 | } | |
152 | /* Ring doorbell */ | |
153 | ehea_update_rq1a(pr->qp, i); | |
154 | } | |
155 | ||
156 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | |
157 | { | |
158 | int ret = 0; | |
159 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
160 | struct net_device *dev = pr->port->netdev; | |
161 | int i; | |
162 | ||
163 | for (i = 0; i < pr->rq1_skba.len; i++) { | |
164 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); | |
165 | if (!skb_arr_rq1[i]) { | |
166 | ehea_error("%s: no mem for skb/%d wqes filled", | |
167 | dev->name, i); | |
168 | ret = -ENOMEM; | |
169 | goto out; | |
170 | } | |
171 | } | |
172 | /* Ring doorbell */ | |
173 | ehea_update_rq1a(pr->qp, nr_rq1a); | |
174 | out: | |
175 | return ret; | |
176 | } | |
177 | ||
178 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | |
179 | struct ehea_q_skb_arr *q_skba, int rq_nr, | |
180 | int num_wqes, int wqe_type, int packet_size) | |
181 | { | |
182 | struct net_device *dev = pr->port->netdev; | |
183 | struct ehea_qp *qp = pr->qp; | |
184 | struct sk_buff **skb_arr = q_skba->arr; | |
185 | struct ehea_rwqe *rwqe; | |
186 | int i, index, max_index_mask, fill_wqes; | |
187 | int ret = 0; | |
188 | ||
189 | fill_wqes = q_skba->os_skbs + num_wqes; | |
190 | ||
191 | if (!fill_wqes) | |
192 | return ret; | |
193 | ||
194 | index = q_skba->index; | |
195 | max_index_mask = q_skba->len - 1; | |
196 | for (i = 0; i < fill_wqes; i++) { | |
197 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); | |
198 | if (!skb) { | |
199 | ehea_error("%s: no mem for skb/%d wqes filled", | |
200 | dev->name, i); | |
201 | q_skba->os_skbs = fill_wqes - i; | |
202 | ret = -ENOMEM; | |
203 | break; | |
204 | } | |
205 | skb_reserve(skb, NET_IP_ALIGN); | |
206 | ||
207 | skb_arr[index] = skb; | |
208 | ||
209 | rwqe = ehea_get_next_rwqe(qp, rq_nr); | |
210 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | |
211 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); | |
212 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; | |
213 | rwqe->sg_list[0].vaddr = (u64)skb->data; | |
214 | rwqe->sg_list[0].len = packet_size; | |
215 | rwqe->data_segments = 1; | |
216 | ||
217 | index++; | |
218 | index &= max_index_mask; | |
219 | } | |
220 | q_skba->index = index; | |
221 | ||
222 | /* Ring doorbell */ | |
223 | iosync(); | |
224 | if (rq_nr == 2) | |
225 | ehea_update_rq2a(pr->qp, i); | |
226 | else | |
227 | ehea_update_rq3a(pr->qp, i); | |
228 | ||
229 | return ret; | |
230 | } | |
231 | ||
232 | ||
233 | static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) | |
234 | { | |
235 | return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, | |
236 | nr_of_wqes, EHEA_RWQE2_TYPE, | |
237 | EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); | |
238 | } | |
239 | ||
240 | ||
241 | static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) | |
242 | { | |
243 | return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, | |
244 | nr_of_wqes, EHEA_RWQE3_TYPE, | |
245 | EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); | |
246 | } | |
247 | ||
248 | static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) | |
249 | { | |
250 | *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; | |
251 | if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) | |
252 | return 0; | |
253 | if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && | |
254 | (cqe->header_length == 0)) | |
255 | return 0; | |
256 | return -EINVAL; | |
257 | } | |
258 | ||
259 | static inline void ehea_fill_skb(struct net_device *dev, | |
260 | struct sk_buff *skb, struct ehea_cqe *cqe) | |
261 | { | |
262 | int length = cqe->num_bytes_transfered - 4; /*remove CRC */ | |
263 | ||
264 | skb_put(skb, length); | |
265 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
266 | skb->protocol = eth_type_trans(skb, dev); | |
267 | } | |
268 | ||
269 | static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, | |
270 | int arr_len, | |
271 | struct ehea_cqe *cqe) | |
272 | { | |
273 | int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | |
274 | struct sk_buff *skb; | |
275 | void *pref; | |
276 | int x; | |
277 | ||
278 | x = skb_index + 1; | |
279 | x &= (arr_len - 1); | |
280 | ||
281 | pref = skb_array[x]; | |
282 | prefetchw(pref); | |
283 | prefetchw(pref + EHEA_CACHE_LINE); | |
284 | ||
285 | pref = (skb_array[x]->data); | |
286 | prefetch(pref); | |
287 | prefetch(pref + EHEA_CACHE_LINE); | |
288 | prefetch(pref + EHEA_CACHE_LINE * 2); | |
289 | prefetch(pref + EHEA_CACHE_LINE * 3); | |
290 | skb = skb_array[skb_index]; | |
291 | skb_array[skb_index] = NULL; | |
292 | return skb; | |
293 | } | |
294 | ||
295 | static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, | |
296 | int arr_len, int wqe_index) | |
297 | { | |
298 | struct sk_buff *skb; | |
299 | void *pref; | |
300 | int x; | |
301 | ||
302 | x = wqe_index + 1; | |
303 | x &= (arr_len - 1); | |
304 | ||
305 | pref = skb_array[x]; | |
306 | prefetchw(pref); | |
307 | prefetchw(pref + EHEA_CACHE_LINE); | |
308 | ||
309 | pref = (skb_array[x]->data); | |
310 | prefetchw(pref); | |
311 | prefetchw(pref + EHEA_CACHE_LINE); | |
312 | ||
313 | skb = skb_array[wqe_index]; | |
314 | skb_array[wqe_index] = NULL; | |
315 | return skb; | |
316 | } | |
317 | ||
318 | static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | |
319 | struct ehea_cqe *cqe, int *processed_rq2, | |
320 | int *processed_rq3) | |
321 | { | |
322 | struct sk_buff *skb; | |
323 | ||
324 | if (netif_msg_rx_err(pr->port)) { | |
325 | ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); | |
326 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
327 | } | |
328 | ||
329 | if (rq == 2) { | |
330 | *processed_rq2 += 1; | |
331 | skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); | |
332 | dev_kfree_skb(skb); | |
333 | } else if (rq == 3) { | |
334 | *processed_rq3 += 1; | |
335 | skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); | |
336 | dev_kfree_skb(skb); | |
337 | } | |
338 | ||
339 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { | |
340 | ehea_error("Critical receive error. Resetting port."); | |
341 | queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); | |
342 | return 1; | |
343 | } | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
348 | static int ehea_poll(struct net_device *dev, int *budget) | |
349 | { | |
350 | struct ehea_port *port = netdev_priv(dev); | |
351 | struct ehea_port_res *pr = &port->port_res[0]; | |
352 | struct ehea_qp *qp = pr->qp; | |
353 | struct ehea_cqe *cqe; | |
354 | struct sk_buff *skb; | |
355 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
356 | struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; | |
357 | struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; | |
358 | int skb_arr_rq1_len = pr->rq1_skba.len; | |
359 | int skb_arr_rq2_len = pr->rq2_skba.len; | |
360 | int skb_arr_rq3_len = pr->rq3_skba.len; | |
361 | int processed, processed_rq1, processed_rq2, processed_rq3; | |
362 | int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; | |
363 | ||
364 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; | |
365 | last_wqe_index = 0; | |
366 | my_quota = min(*budget, dev->quota); | |
367 | my_quota = min(my_quota, EHEA_POLL_MAX_RWQE); | |
368 | ||
369 | /* rq0 is low latency RQ */ | |
370 | cqe = ehea_poll_rq1(qp, &wqe_index); | |
371 | while ((my_quota > 0) && cqe) { | |
372 | ehea_inc_rq1(qp); | |
373 | processed_rq1++; | |
374 | processed++; | |
375 | my_quota--; | |
376 | if (netif_msg_rx_status(port)) | |
377 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
378 | ||
379 | last_wqe_index = wqe_index; | |
380 | rmb(); | |
381 | if (!ehea_check_cqe(cqe, &rq)) { | |
382 | if (rq == 1) { /* LL RQ1 */ | |
383 | skb = get_skb_by_index_ll(skb_arr_rq1, | |
384 | skb_arr_rq1_len, | |
385 | wqe_index); | |
386 | if (unlikely(!skb)) { | |
387 | if (netif_msg_rx_err(port)) | |
388 | ehea_error("LL rq1: skb=NULL"); | |
389 | skb = netdev_alloc_skb(dev, | |
390 | EHEA_L_PKT_SIZE); | |
391 | if (!skb) | |
392 | break; | |
393 | } | |
394 | memcpy(skb->data, ((char*)cqe) + 64, | |
395 | cqe->num_bytes_transfered - 4); | |
396 | ehea_fill_skb(dev, skb, cqe); | |
397 | } else if (rq == 2) { /* RQ2 */ | |
398 | skb = get_skb_by_index(skb_arr_rq2, | |
399 | skb_arr_rq2_len, cqe); | |
400 | if (unlikely(!skb)) { | |
401 | if (netif_msg_rx_err(port)) | |
402 | ehea_error("rq2: skb=NULL"); | |
403 | break; | |
404 | } | |
405 | ehea_fill_skb(dev, skb, cqe); | |
406 | processed_rq2++; | |
407 | } else { /* RQ3 */ | |
408 | skb = get_skb_by_index(skb_arr_rq3, | |
409 | skb_arr_rq3_len, cqe); | |
410 | if (unlikely(!skb)) { | |
411 | if (netif_msg_rx_err(port)) | |
412 | ehea_error("rq3: skb=NULL"); | |
413 | break; | |
414 | } | |
415 | ehea_fill_skb(dev, skb, cqe); | |
416 | processed_rq3++; | |
417 | } | |
418 | ||
419 | if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) | |
420 | vlan_hwaccel_receive_skb(skb, port->vgrp, | |
421 | cqe->vlan_tag); | |
422 | else | |
423 | netif_receive_skb(skb); | |
424 | ||
425 | } else { /* Error occured */ | |
426 | pr->p_state.poll_receive_errors++; | |
427 | port_reset = ehea_treat_poll_error(pr, rq, cqe, | |
428 | &processed_rq2, | |
429 | &processed_rq3); | |
430 | if (port_reset) | |
431 | break; | |
432 | } | |
433 | cqe = ehea_poll_rq1(qp, &wqe_index); | |
434 | } | |
435 | ||
436 | dev->quota -= processed; | |
437 | *budget -= processed; | |
438 | ||
439 | pr->p_state.ehea_poll += 1; | |
440 | pr->rx_packets += processed; | |
441 | ||
442 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); | |
443 | ehea_refill_rq2(pr, processed_rq2); | |
444 | ehea_refill_rq3(pr, processed_rq3); | |
445 | ||
446 | intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); | |
447 | ||
448 | if (!cqe || intreq) { | |
449 | netif_rx_complete(dev); | |
450 | ehea_reset_cq_ep(pr->recv_cq); | |
451 | ehea_reset_cq_n1(pr->recv_cq); | |
452 | cqe = hw_qeit_get_valid(&qp->hw_rqueue1); | |
453 | if (!cqe || intreq) | |
454 | return 0; | |
455 | if (!netif_rx_reschedule(dev, my_quota)) | |
456 | return 0; | |
457 | } | |
458 | return 1; | |
459 | } | |
460 | ||
461 | void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) | |
462 | { | |
463 | struct sk_buff *skb; | |
464 | int index, max_index_mask, i; | |
465 | ||
466 | index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | |
467 | max_index_mask = pr->sq_skba.len - 1; | |
468 | for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) { | |
469 | skb = pr->sq_skba.arr[index]; | |
470 | if (likely(skb)) { | |
471 | dev_kfree_skb(skb); | |
472 | pr->sq_skba.arr[index] = NULL; | |
473 | } else { | |
474 | ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d", | |
475 | cqe->wr_id, i, index); | |
476 | } | |
477 | index--; | |
478 | index &= max_index_mask; | |
479 | } | |
480 | } | |
481 | ||
482 | #define MAX_SENDCOMP_QUOTA 400 | |
483 | void ehea_send_irq_tasklet(unsigned long data) | |
484 | { | |
485 | struct ehea_port_res *pr = (struct ehea_port_res*)data; | |
486 | struct ehea_cq *send_cq = pr->send_cq; | |
487 | struct ehea_cqe *cqe; | |
488 | int quota = MAX_SENDCOMP_QUOTA; | |
489 | int cqe_counter = 0; | |
490 | int swqe_av = 0; | |
491 | unsigned long flags; | |
492 | ||
493 | do { | |
494 | cqe = ehea_poll_cq(send_cq); | |
495 | if (!cqe) { | |
496 | ehea_reset_cq_ep(send_cq); | |
497 | ehea_reset_cq_n1(send_cq); | |
498 | cqe = ehea_poll_cq(send_cq); | |
499 | if (!cqe) | |
500 | break; | |
501 | } | |
502 | cqe_counter++; | |
503 | rmb(); | |
504 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | |
505 | ehea_error("Send Completion Error: Resetting port"); | |
506 | if (netif_msg_tx_err(pr->port)) | |
507 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | |
508 | queue_work(pr->port->adapter->ehea_wq, | |
509 | &pr->port->reset_task); | |
510 | break; | |
511 | } | |
512 | ||
513 | if (netif_msg_tx_done(pr->port)) | |
514 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
515 | ||
516 | if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) | |
517 | == EHEA_SWQE2_TYPE)) | |
518 | free_sent_skbs(cqe, pr); | |
519 | ||
520 | swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); | |
521 | quota--; | |
522 | } while (quota > 0); | |
523 | ||
524 | ehea_update_feca(send_cq, cqe_counter); | |
525 | atomic_add(swqe_av, &pr->swqe_avail); | |
526 | ||
527 | spin_lock_irqsave(&pr->netif_queue, flags); | |
528 | if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) | |
529 | >= pr->swqe_refill_th)) { | |
530 | netif_wake_queue(pr->port->netdev); | |
531 | pr->queue_stopped = 0; | |
532 | } | |
533 | spin_unlock_irqrestore(&pr->netif_queue, flags); | |
534 | ||
535 | if (unlikely(cqe)) | |
536 | tasklet_hi_schedule(&pr->send_comp_task); | |
537 | } | |
538 | ||
7d12e780 | 539 | static irqreturn_t ehea_send_irq_handler(int irq, void *param) |
7a291083 JBT |
540 | { |
541 | struct ehea_port_res *pr = param; | |
542 | tasklet_hi_schedule(&pr->send_comp_task); | |
543 | return IRQ_HANDLED; | |
544 | } | |
545 | ||
7d12e780 | 546 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) |
7a291083 JBT |
547 | { |
548 | struct ehea_port_res *pr = param; | |
549 | struct ehea_port *port = pr->port; | |
550 | netif_rx_schedule(port->netdev); | |
551 | return IRQ_HANDLED; | |
552 | } | |
553 | ||
7d12e780 | 554 | static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) |
7a291083 JBT |
555 | { |
556 | struct ehea_port *port = param; | |
557 | struct ehea_eqe *eqe; | |
558 | u32 qp_token; | |
559 | ||
560 | eqe = ehea_poll_eq(port->qp_eq); | |
bb3a6449 | 561 | |
7a291083 | 562 | while (eqe) { |
7a291083 | 563 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); |
bb3a6449 TK |
564 | ehea_error("QP aff_err: entry=0x%lx, token=0x%x", |
565 | eqe->entry, qp_token); | |
566 | eqe = ehea_poll_eq(port->qp_eq); | |
7a291083 JBT |
567 | } |
568 | ||
569 | return IRQ_HANDLED; | |
570 | } | |
571 | ||
572 | static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, | |
573 | int logical_port) | |
574 | { | |
575 | int i; | |
576 | ||
577 | for (i = 0; i < adapter->num_ports; i++) | |
41b69c70 TK |
578 | if (adapter->port[i]) |
579 | if (adapter->port[i]->logical_port_id == logical_port) | |
580 | return adapter->port[i]; | |
7a291083 JBT |
581 | return NULL; |
582 | } | |
583 | ||
584 | int ehea_sense_port_attr(struct ehea_port *port) | |
585 | { | |
586 | int ret; | |
587 | u64 hret; | |
588 | struct hcp_ehea_port_cb0 *cb0; | |
589 | ||
a1d261c5 TK |
590 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ |
591 | if (!cb0) { /* ehea_neq_tasklet() */ | |
7a291083 JBT |
592 | ehea_error("no mem for cb0"); |
593 | ret = -ENOMEM; | |
594 | goto out; | |
595 | } | |
596 | ||
597 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
598 | port->logical_port_id, H_PORT_CB0, | |
599 | EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), | |
600 | cb0); | |
601 | if (hret != H_SUCCESS) { | |
602 | ret = -EIO; | |
603 | goto out_free; | |
604 | } | |
605 | ||
606 | /* MAC address */ | |
607 | port->mac_addr = cb0->port_mac_addr << 16; | |
608 | ||
609 | if (!is_valid_ether_addr((u8*)&port->mac_addr)) { | |
610 | ret = -EADDRNOTAVAIL; | |
611 | goto out_free; | |
612 | } | |
613 | ||
614 | /* Port speed */ | |
615 | switch (cb0->port_speed) { | |
616 | case H_SPEED_10M_H: | |
617 | port->port_speed = EHEA_SPEED_10M; | |
618 | port->full_duplex = 0; | |
619 | break; | |
620 | case H_SPEED_10M_F: | |
621 | port->port_speed = EHEA_SPEED_10M; | |
622 | port->full_duplex = 1; | |
623 | break; | |
624 | case H_SPEED_100M_H: | |
625 | port->port_speed = EHEA_SPEED_100M; | |
626 | port->full_duplex = 0; | |
627 | break; | |
628 | case H_SPEED_100M_F: | |
629 | port->port_speed = EHEA_SPEED_100M; | |
630 | port->full_duplex = 1; | |
631 | break; | |
632 | case H_SPEED_1G_F: | |
633 | port->port_speed = EHEA_SPEED_1G; | |
634 | port->full_duplex = 1; | |
635 | break; | |
636 | case H_SPEED_10G_F: | |
637 | port->port_speed = EHEA_SPEED_10G; | |
638 | port->full_duplex = 1; | |
639 | break; | |
640 | default: | |
641 | port->port_speed = 0; | |
642 | port->full_duplex = 0; | |
643 | break; | |
644 | } | |
645 | ||
e919b593 TK |
646 | port->autoneg = 1; |
647 | ||
7a291083 JBT |
648 | /* Number of default QPs */ |
649 | port->num_def_qps = cb0->num_default_qps; | |
650 | ||
651 | if (!port->num_def_qps) { | |
652 | ret = -EINVAL; | |
653 | goto out_free; | |
654 | } | |
655 | ||
656 | if (port->num_def_qps >= EHEA_NUM_TX_QP) | |
657 | port->num_add_tx_qps = 0; | |
658 | else | |
659 | port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; | |
660 | ||
661 | ret = 0; | |
662 | out_free: | |
663 | if (ret || netif_msg_probe(port)) | |
664 | ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); | |
665 | kfree(cb0); | |
666 | out: | |
667 | return ret; | |
668 | } | |
669 | ||
670 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |
671 | { | |
672 | struct hcp_ehea_port_cb4 *cb4; | |
673 | u64 hret; | |
674 | int ret = 0; | |
675 | ||
a1d261c5 | 676 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
677 | if (!cb4) { |
678 | ehea_error("no mem for cb4"); | |
679 | ret = -ENOMEM; | |
680 | goto out; | |
681 | } | |
682 | ||
683 | cb4->port_speed = port_speed; | |
684 | ||
685 | netif_carrier_off(port->netdev); | |
686 | ||
687 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
688 | port->logical_port_id, | |
689 | H_PORT_CB4, H_PORT_CB4_SPEED, cb4); | |
690 | if (hret == H_SUCCESS) { | |
691 | port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; | |
692 | ||
693 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
694 | port->logical_port_id, | |
695 | H_PORT_CB4, H_PORT_CB4_SPEED, | |
696 | cb4); | |
697 | if (hret == H_SUCCESS) { | |
698 | switch (cb4->port_speed) { | |
699 | case H_SPEED_10M_H: | |
700 | port->port_speed = EHEA_SPEED_10M; | |
701 | port->full_duplex = 0; | |
702 | break; | |
703 | case H_SPEED_10M_F: | |
704 | port->port_speed = EHEA_SPEED_10M; | |
705 | port->full_duplex = 1; | |
706 | break; | |
707 | case H_SPEED_100M_H: | |
708 | port->port_speed = EHEA_SPEED_100M; | |
709 | port->full_duplex = 0; | |
710 | break; | |
711 | case H_SPEED_100M_F: | |
712 | port->port_speed = EHEA_SPEED_100M; | |
713 | port->full_duplex = 1; | |
714 | break; | |
715 | case H_SPEED_1G_F: | |
716 | port->port_speed = EHEA_SPEED_1G; | |
717 | port->full_duplex = 1; | |
718 | break; | |
719 | case H_SPEED_10G_F: | |
720 | port->port_speed = EHEA_SPEED_10G; | |
721 | port->full_duplex = 1; | |
722 | break; | |
723 | default: | |
724 | port->port_speed = 0; | |
725 | port->full_duplex = 0; | |
726 | break; | |
727 | } | |
728 | } else { | |
729 | ehea_error("Failed sensing port speed"); | |
730 | ret = -EIO; | |
731 | } | |
732 | } else { | |
733 | if (hret == H_AUTHORITY) { | |
7674a588 | 734 | ehea_info("Hypervisor denied setting port speed"); |
7a291083 JBT |
735 | ret = -EPERM; |
736 | } else { | |
737 | ret = -EIO; | |
738 | ehea_error("Failed setting port speed"); | |
739 | } | |
740 | } | |
741 | netif_carrier_on(port->netdev); | |
742 | kfree(cb4); | |
743 | out: | |
744 | return ret; | |
745 | } | |
746 | ||
747 | static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | |
748 | { | |
749 | int ret; | |
750 | u8 ec; | |
751 | u8 portnum; | |
752 | struct ehea_port *port; | |
753 | ||
754 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); | |
755 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); | |
756 | port = ehea_get_port(adapter, portnum); | |
757 | ||
758 | switch (ec) { | |
759 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ | |
760 | ||
761 | if (!port) { | |
762 | ehea_error("unknown portnum %x", portnum); | |
763 | break; | |
764 | } | |
765 | ||
766 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { | |
767 | if (!netif_carrier_ok(port->netdev)) { | |
1e1675cc | 768 | ret = ehea_sense_port_attr(port); |
7a291083 JBT |
769 | if (ret) { |
770 | ehea_error("failed resensing port " | |
771 | "attributes"); | |
772 | break; | |
773 | } | |
774 | ||
775 | if (netif_msg_link(port)) | |
776 | ehea_info("%s: Logical port up: %dMbps " | |
777 | "%s Duplex", | |
778 | port->netdev->name, | |
779 | port->port_speed, | |
780 | port->full_duplex == | |
781 | 1 ? "Full" : "Half"); | |
782 | ||
783 | netif_carrier_on(port->netdev); | |
784 | netif_wake_queue(port->netdev); | |
785 | } | |
786 | } else | |
787 | if (netif_carrier_ok(port->netdev)) { | |
788 | if (netif_msg_link(port)) | |
789 | ehea_info("%s: Logical port down", | |
790 | port->netdev->name); | |
791 | netif_carrier_off(port->netdev); | |
792 | netif_stop_queue(port->netdev); | |
793 | } | |
794 | ||
795 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { | |
796 | if (netif_msg_link(port)) | |
797 | ehea_info("%s: Physical port up", | |
798 | port->netdev->name); | |
799 | } else { | |
800 | if (netif_msg_link(port)) | |
801 | ehea_info("%s: Physical port down", | |
802 | port->netdev->name); | |
803 | } | |
804 | ||
805 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) | |
806 | ehea_info("External switch port is primary port"); | |
807 | else | |
808 | ehea_info("External switch port is backup port"); | |
809 | ||
810 | break; | |
811 | case EHEA_EC_ADAPTER_MALFUNC: | |
812 | ehea_error("Adapter malfunction"); | |
813 | break; | |
814 | case EHEA_EC_PORT_MALFUNC: | |
815 | ehea_info("Port malfunction: Device: %s", port->netdev->name); | |
816 | netif_carrier_off(port->netdev); | |
817 | netif_stop_queue(port->netdev); | |
818 | break; | |
819 | default: | |
bff0a55f | 820 | ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe); |
7a291083 JBT |
821 | break; |
822 | } | |
823 | } | |
824 | ||
825 | static void ehea_neq_tasklet(unsigned long data) | |
826 | { | |
827 | struct ehea_adapter *adapter = (struct ehea_adapter*)data; | |
828 | struct ehea_eqe *eqe; | |
829 | u64 event_mask; | |
830 | ||
831 | eqe = ehea_poll_eq(adapter->neq); | |
832 | ehea_debug("eqe=%p", eqe); | |
833 | ||
834 | while (eqe) { | |
835 | ehea_debug("*eqe=%lx", eqe->entry); | |
836 | ehea_parse_eqe(adapter, eqe->entry); | |
837 | eqe = ehea_poll_eq(adapter->neq); | |
838 | ehea_debug("next eqe=%p", eqe); | |
839 | } | |
840 | ||
841 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) | |
842 | | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) | |
843 | | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); | |
844 | ||
845 | ehea_h_reset_events(adapter->handle, | |
846 | adapter->neq->fw_handle, event_mask); | |
847 | } | |
848 | ||
7d12e780 | 849 | static irqreturn_t ehea_interrupt_neq(int irq, void *param) |
7a291083 JBT |
850 | { |
851 | struct ehea_adapter *adapter = param; | |
852 | tasklet_hi_schedule(&adapter->neq_tasklet); | |
853 | return IRQ_HANDLED; | |
854 | } | |
855 | ||
856 | ||
857 | static int ehea_fill_port_res(struct ehea_port_res *pr) | |
858 | { | |
859 | int ret; | |
860 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | |
861 | ||
862 | ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | |
863 | - init_attr->act_nr_rwqes_rq2 | |
864 | - init_attr->act_nr_rwqes_rq3 - 1); | |
865 | ||
866 | ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | |
867 | ||
868 | ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); | |
869 | ||
870 | return ret; | |
871 | } | |
872 | ||
873 | static int ehea_reg_interrupts(struct net_device *dev) | |
874 | { | |
875 | struct ehea_port *port = netdev_priv(dev); | |
876 | struct ehea_port_res *pr; | |
877 | int i, ret; | |
878 | ||
879 | for (i = 0; i < port->num_def_qps; i++) { | |
880 | pr = &port->port_res[i]; | |
881 | snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1 | |
882 | , "%s-recv%d", dev->name, i); | |
883 | ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1, | |
884 | ehea_recv_irq_handler, | |
885 | SA_INTERRUPT, pr->int_recv_name, pr); | |
886 | if (ret) { | |
887 | ehea_error("failed registering irq for ehea_recv_int:" | |
888 | "port_res_nr:%d, ist=%X", i, | |
889 | pr->recv_eq->attr.ist1); | |
890 | goto out_free_seq; | |
891 | } | |
892 | if (netif_msg_ifup(port)) | |
893 | ehea_info("irq_handle 0x%X for funct ehea_recv_int %d " | |
894 | "registered", pr->recv_eq->attr.ist1, i); | |
895 | } | |
896 | ||
897 | snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", | |
898 | dev->name); | |
899 | ||
900 | ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1, | |
901 | ehea_qp_aff_irq_handler, | |
902 | SA_INTERRUPT, port->int_aff_name, port); | |
903 | if (ret) { | |
904 | ehea_error("failed registering irq for qp_aff_irq_handler:" | |
905 | "ist=%X", port->qp_eq->attr.ist1); | |
906 | goto out_free_qpeq; | |
907 | } | |
908 | ||
909 | if (netif_msg_ifup(port)) | |
910 | ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " | |
911 | "registered", port->qp_eq->attr.ist1); | |
912 | ||
913 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
914 | pr = &port->port_res[i]; | |
915 | snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, | |
916 | "%s-send%d", dev->name, i); | |
917 | ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, | |
918 | ehea_send_irq_handler, | |
919 | SA_INTERRUPT, pr->int_send_name, | |
920 | pr); | |
921 | if (ret) { | |
922 | ehea_error("failed registering irq for ehea_send " | |
923 | "port_res_nr:%d, ist=%X", i, | |
924 | pr->send_eq->attr.ist1); | |
925 | goto out_free_req; | |
926 | } | |
927 | if (netif_msg_ifup(port)) | |
928 | ehea_info("irq_handle 0x%X for function ehea_send_int " | |
929 | "%d registered", pr->send_eq->attr.ist1, i); | |
930 | } | |
931 | out: | |
932 | return ret; | |
933 | ||
934 | out_free_req: | |
935 | while (--i >= 0) { | |
936 | u32 ist = port->port_res[i].send_eq->attr.ist1; | |
937 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | |
938 | } | |
939 | out_free_qpeq: | |
940 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | |
941 | i = port->num_def_qps; | |
942 | out_free_seq: | |
943 | while (--i >= 0) { | |
944 | u32 ist = port->port_res[i].recv_eq->attr.ist1; | |
945 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | |
946 | } | |
947 | goto out; | |
948 | } | |
949 | ||
950 | static void ehea_free_interrupts(struct net_device *dev) | |
951 | { | |
952 | struct ehea_port *port = netdev_priv(dev); | |
953 | struct ehea_port_res *pr; | |
954 | int i; | |
955 | ||
956 | /* send */ | |
957 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
958 | pr = &port->port_res[i]; | |
959 | ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); | |
960 | if (netif_msg_intr(port)) | |
961 | ehea_info("free send irq for res %d with handle 0x%X", | |
962 | i, pr->send_eq->attr.ist1); | |
963 | } | |
964 | ||
965 | /* receive */ | |
966 | for (i = 0; i < port->num_def_qps; i++) { | |
967 | pr = &port->port_res[i]; | |
968 | ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr); | |
969 | if (netif_msg_intr(port)) | |
970 | ehea_info("free recv irq for res %d with handle 0x%X", | |
971 | i, pr->recv_eq->attr.ist1); | |
972 | } | |
973 | ||
974 | /* associated events */ | |
975 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | |
976 | if (netif_msg_intr(port)) | |
977 | ehea_info("associated event interrupt for handle 0x%X freed", | |
978 | port->qp_eq->attr.ist1); | |
979 | } | |
980 | ||
981 | static int ehea_configure_port(struct ehea_port *port) | |
982 | { | |
983 | int ret, i; | |
984 | u64 hret, mask; | |
985 | struct hcp_ehea_port_cb0 *cb0; | |
986 | ||
987 | ret = -ENOMEM; | |
a1d261c5 | 988 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
989 | if (!cb0) |
990 | goto out; | |
991 | ||
992 | cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) | |
993 | | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) | |
994 | | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) | |
995 | | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) | |
996 | | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, | |
997 | PXLY_RC_VLAN_FILTER) | |
998 | | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); | |
999 | ||
1000 | for (i = 0; i < port->num_def_qps; i++) | |
602e0d10 | 1001 | cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr; |
7a291083 JBT |
1002 | |
1003 | if (netif_msg_ifup(port)) | |
1004 | ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); | |
1005 | ||
1006 | mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) | |
1007 | | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); | |
1008 | ||
1009 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1010 | port->logical_port_id, | |
1011 | H_PORT_CB0, mask, cb0); | |
1012 | ret = -EIO; | |
1013 | if (hret != H_SUCCESS) | |
1014 | goto out_free; | |
1015 | ||
1016 | ret = 0; | |
1017 | ||
1018 | out_free: | |
1019 | kfree(cb0); | |
1020 | out: | |
1021 | return ret; | |
1022 | } | |
1023 | ||
1024 | static int ehea_gen_smrs(struct ehea_port_res *pr) | |
1025 | { | |
1026 | u64 hret; | |
1027 | struct ehea_adapter *adapter = pr->port->adapter; | |
1028 | ||
1029 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | |
1030 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | |
1031 | adapter->pd, &pr->send_mr); | |
1032 | if (hret != H_SUCCESS) | |
1033 | goto out; | |
1034 | ||
1035 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | |
1036 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | |
1037 | adapter->pd, &pr->recv_mr); | |
1038 | if (hret != H_SUCCESS) | |
1039 | goto out_freeres; | |
1040 | ||
1041 | return 0; | |
1042 | ||
1043 | out_freeres: | |
1044 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | |
1045 | if (hret != H_SUCCESS) | |
1046 | ehea_error("failed freeing SMR"); | |
1047 | out: | |
1048 | return -EIO; | |
1049 | } | |
1050 | ||
1051 | static int ehea_rem_smrs(struct ehea_port_res *pr) | |
1052 | { | |
1053 | struct ehea_adapter *adapter = pr->port->adapter; | |
1054 | int ret = 0; | |
1055 | u64 hret; | |
1056 | ||
1057 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | |
1058 | if (hret != H_SUCCESS) { | |
1059 | ret = -EIO; | |
1060 | ehea_error("failed freeing send SMR for pr=%p", pr); | |
1061 | } | |
1062 | ||
1063 | hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle); | |
1064 | if (hret != H_SUCCESS) { | |
1065 | ret = -EIO; | |
1066 | ehea_error("failed freeing recv SMR for pr=%p", pr); | |
1067 | } | |
1068 | ||
1069 | return ret; | |
1070 | } | |
1071 | ||
1072 | static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) | |
1073 | { | |
1074 | int arr_size = sizeof(void*) * max_q_entries; | |
1075 | ||
1076 | q_skba->arr = vmalloc(arr_size); | |
1077 | if (!q_skba->arr) | |
1078 | return -ENOMEM; | |
1079 | ||
1080 | memset(q_skba->arr, 0, arr_size); | |
1081 | ||
1082 | q_skba->len = max_q_entries; | |
1083 | q_skba->index = 0; | |
1084 | q_skba->os_skbs = 0; | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |
1090 | struct port_res_cfg *pr_cfg, int queue_token) | |
1091 | { | |
1092 | struct ehea_adapter *adapter = port->adapter; | |
1093 | enum ehea_eq_type eq_type = EHEA_EQ; | |
1094 | struct ehea_qp_init_attr *init_attr = NULL; | |
1095 | int ret = -EIO; | |
1096 | ||
1097 | memset(pr, 0, sizeof(struct ehea_port_res)); | |
1098 | ||
1099 | pr->port = port; | |
1100 | spin_lock_init(&pr->send_lock); | |
1101 | spin_lock_init(&pr->recv_lock); | |
1102 | spin_lock_init(&pr->xmit_lock); | |
1103 | spin_lock_init(&pr->netif_queue); | |
1104 | ||
1105 | pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | |
1106 | if (!pr->recv_eq) { | |
1107 | ehea_error("create_eq failed (recv_eq)"); | |
1108 | goto out_free; | |
1109 | } | |
1110 | ||
1111 | pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | |
1112 | if (!pr->send_eq) { | |
1113 | ehea_error("create_eq failed (send_eq)"); | |
1114 | goto out_free; | |
1115 | } | |
1116 | ||
1117 | pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, | |
1118 | pr->recv_eq->fw_handle, | |
1119 | port->logical_port_id); | |
1120 | if (!pr->recv_cq) { | |
1121 | ehea_error("create_cq failed (cq_recv)"); | |
1122 | goto out_free; | |
1123 | } | |
1124 | ||
1125 | pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, | |
1126 | pr->send_eq->fw_handle, | |
1127 | port->logical_port_id); | |
1128 | if (!pr->send_cq) { | |
1129 | ehea_error("create_cq failed (cq_send)"); | |
1130 | goto out_free; | |
1131 | } | |
1132 | ||
1133 | if (netif_msg_ifup(port)) | |
1134 | ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", | |
1135 | pr->send_cq->attr.act_nr_of_cqes, | |
1136 | pr->recv_cq->attr.act_nr_of_cqes); | |
1137 | ||
1138 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | |
1139 | if (!init_attr) { | |
1140 | ret = -ENOMEM; | |
1141 | ehea_error("no mem for ehea_qp_init_attr"); | |
1142 | goto out_free; | |
1143 | } | |
1144 | ||
1145 | init_attr->low_lat_rq1 = 1; | |
1146 | init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ | |
1147 | init_attr->rq_count = 3; | |
1148 | init_attr->qp_token = queue_token; | |
1149 | init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; | |
1150 | init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; | |
1151 | init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; | |
1152 | init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; | |
1153 | init_attr->wqe_size_enc_sq = EHEA_SG_SQ; | |
1154 | init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; | |
1155 | init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; | |
1156 | init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; | |
1157 | init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; | |
1158 | init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; | |
1159 | init_attr->port_nr = port->logical_port_id; | |
1160 | init_attr->send_cq_handle = pr->send_cq->fw_handle; | |
1161 | init_attr->recv_cq_handle = pr->recv_cq->fw_handle; | |
1162 | init_attr->aff_eq_handle = port->qp_eq->fw_handle; | |
1163 | ||
1164 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); | |
1165 | if (!pr->qp) { | |
1166 | ehea_error("create_qp failed"); | |
1167 | ret = -EIO; | |
1168 | goto out_free; | |
1169 | } | |
1170 | ||
1171 | if (netif_msg_ifup(port)) | |
1172 | ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " | |
1173 | "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, | |
1174 | init_attr->act_nr_send_wqes, | |
1175 | init_attr->act_nr_rwqes_rq1, | |
1176 | init_attr->act_nr_rwqes_rq2, | |
1177 | init_attr->act_nr_rwqes_rq3); | |
1178 | ||
1179 | ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1); | |
1180 | ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); | |
1181 | ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); | |
1182 | ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); | |
1183 | if (ret) | |
1184 | goto out_free; | |
1185 | ||
1186 | pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; | |
1187 | if (ehea_gen_smrs(pr) != 0) { | |
1188 | ret = -EIO; | |
1189 | goto out_free; | |
1190 | } | |
1191 | tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, | |
1192 | (unsigned long)pr); | |
1193 | atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); | |
1194 | ||
1195 | kfree(init_attr); | |
1196 | ret = 0; | |
1197 | goto out; | |
1198 | ||
1199 | out_free: | |
1200 | kfree(init_attr); | |
1201 | vfree(pr->sq_skba.arr); | |
1202 | vfree(pr->rq1_skba.arr); | |
1203 | vfree(pr->rq2_skba.arr); | |
1204 | vfree(pr->rq3_skba.arr); | |
1205 | ehea_destroy_qp(pr->qp); | |
1206 | ehea_destroy_cq(pr->send_cq); | |
1207 | ehea_destroy_cq(pr->recv_cq); | |
1208 | ehea_destroy_eq(pr->send_eq); | |
1209 | ehea_destroy_eq(pr->recv_eq); | |
1210 | out: | |
1211 | return ret; | |
1212 | } | |
1213 | ||
1214 | static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |
1215 | { | |
1216 | int ret, i; | |
1217 | ||
1218 | ret = ehea_destroy_qp(pr->qp); | |
1219 | ||
1220 | if (!ret) { | |
1221 | ehea_destroy_cq(pr->send_cq); | |
1222 | ehea_destroy_cq(pr->recv_cq); | |
1223 | ehea_destroy_eq(pr->send_eq); | |
1224 | ehea_destroy_eq(pr->recv_eq); | |
1225 | ||
1226 | for (i = 0; i < pr->rq1_skba.len; i++) | |
1227 | if (pr->rq1_skba.arr[i]) | |
1228 | dev_kfree_skb(pr->rq1_skba.arr[i]); | |
1229 | ||
1230 | for (i = 0; i < pr->rq2_skba.len; i++) | |
1231 | if (pr->rq2_skba.arr[i]) | |
1232 | dev_kfree_skb(pr->rq2_skba.arr[i]); | |
1233 | ||
1234 | for (i = 0; i < pr->rq3_skba.len; i++) | |
1235 | if (pr->rq3_skba.arr[i]) | |
1236 | dev_kfree_skb(pr->rq3_skba.arr[i]); | |
1237 | ||
1238 | for (i = 0; i < pr->sq_skba.len; i++) | |
1239 | if (pr->sq_skba.arr[i]) | |
1240 | dev_kfree_skb(pr->sq_skba.arr[i]); | |
1241 | ||
1242 | vfree(pr->rq1_skba.arr); | |
1243 | vfree(pr->rq2_skba.arr); | |
1244 | vfree(pr->rq3_skba.arr); | |
1245 | vfree(pr->sq_skba.arr); | |
1246 | ret = ehea_rem_smrs(pr); | |
1247 | } | |
1248 | return ret; | |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * The write_* functions store information in swqe which is used by | |
1253 | * the hardware to calculate the ip/tcp/udp checksum | |
1254 | */ | |
1255 | ||
1256 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | |
1257 | const struct sk_buff *skb) | |
1258 | { | |
1259 | swqe->ip_start = (u8)(((u64)skb->nh.iph) - ((u64)skb->data)); | |
1260 | swqe->ip_end = (u8)(swqe->ip_start + skb->nh.iph->ihl * 4 - 1); | |
1261 | } | |
1262 | ||
1263 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | |
1264 | const struct sk_buff *skb) | |
1265 | { | |
1266 | swqe->tcp_offset = | |
1267 | (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); | |
1268 | ||
1269 | swqe->tcp_end = (u16)skb->len - 1; | |
1270 | } | |
1271 | ||
1272 | static inline void write_udp_offset_end(struct ehea_swqe *swqe, | |
1273 | const struct sk_buff *skb) | |
1274 | { | |
1275 | swqe->tcp_offset = | |
1276 | (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); | |
1277 | ||
1278 | swqe->tcp_end = (u16)skb->len - 1; | |
1279 | } | |
1280 | ||
1281 | ||
1282 | static void write_swqe2_TSO(struct sk_buff *skb, | |
1283 | struct ehea_swqe *swqe, u32 lkey) | |
1284 | { | |
1285 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1286 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | |
1287 | int skb_data_size = skb->len - skb->data_len; | |
1288 | int headersize; | |
1289 | u64 tmp_addr; | |
1290 | ||
1291 | /* Packet is TCP with TSO enabled */ | |
1292 | swqe->tx_control |= EHEA_SWQE_TSO; | |
1293 | swqe->mss = skb_shinfo(skb)->gso_size; | |
1294 | /* copy only eth/ip/tcp headers to immediate data and | |
1295 | * the rest of skb->data to sg1entry | |
1296 | */ | |
1297 | headersize = ETH_HLEN + (skb->nh.iph->ihl * 4) + (skb->h.th->doff * 4); | |
1298 | ||
1299 | skb_data_size = skb->len - skb->data_len; | |
1300 | ||
1301 | if (skb_data_size >= headersize) { | |
1302 | /* copy immediate data */ | |
1303 | memcpy(imm_data, skb->data, headersize); | |
1304 | swqe->immediate_data_length = headersize; | |
1305 | ||
1306 | if (skb_data_size > headersize) { | |
1307 | /* set sg1entry data */ | |
1308 | sg1entry->l_key = lkey; | |
1309 | sg1entry->len = skb_data_size - headersize; | |
1310 | ||
1311 | tmp_addr = (u64)(skb->data + headersize); | |
1312 | sg1entry->vaddr = tmp_addr; | |
1313 | swqe->descriptors++; | |
1314 | } | |
1315 | } else | |
1316 | ehea_error("cannot handle fragmented headers"); | |
1317 | } | |
1318 | ||
1319 | static void write_swqe2_nonTSO(struct sk_buff *skb, | |
1320 | struct ehea_swqe *swqe, u32 lkey) | |
1321 | { | |
1322 | int skb_data_size = skb->len - skb->data_len; | |
1323 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | |
1324 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1325 | u64 tmp_addr; | |
1326 | ||
1327 | /* Packet is any nonTSO type | |
1328 | * | |
1329 | * Copy as much as possible skb->data to immediate data and | |
1330 | * the rest to sg1entry | |
1331 | */ | |
1332 | if (skb_data_size >= SWQE2_MAX_IMM) { | |
1333 | /* copy immediate data */ | |
1334 | memcpy(imm_data, skb->data, SWQE2_MAX_IMM); | |
1335 | ||
1336 | swqe->immediate_data_length = SWQE2_MAX_IMM; | |
1337 | ||
1338 | if (skb_data_size > SWQE2_MAX_IMM) { | |
1339 | /* copy sg1entry data */ | |
1340 | sg1entry->l_key = lkey; | |
1341 | sg1entry->len = skb_data_size - SWQE2_MAX_IMM; | |
1342 | tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); | |
1343 | sg1entry->vaddr = tmp_addr; | |
1344 | swqe->descriptors++; | |
1345 | } | |
1346 | } else { | |
1347 | memcpy(imm_data, skb->data, skb_data_size); | |
1348 | swqe->immediate_data_length = skb_data_size; | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, | |
1353 | struct ehea_swqe *swqe, u32 lkey) | |
1354 | { | |
1355 | struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; | |
1356 | skb_frag_t *frag; | |
1357 | int nfrags, sg1entry_contains_frag_data, i; | |
1358 | u64 tmp_addr; | |
1359 | ||
1360 | nfrags = skb_shinfo(skb)->nr_frags; | |
1361 | sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1362 | sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list; | |
1363 | swqe->descriptors = 0; | |
1364 | sg1entry_contains_frag_data = 0; | |
1365 | ||
1366 | if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) | |
1367 | write_swqe2_TSO(skb, swqe, lkey); | |
1368 | else | |
1369 | write_swqe2_nonTSO(skb, swqe, lkey); | |
1370 | ||
1371 | /* write descriptors */ | |
1372 | if (nfrags > 0) { | |
1373 | if (swqe->descriptors == 0) { | |
1374 | /* sg1entry not yet used */ | |
1375 | frag = &skb_shinfo(skb)->frags[0]; | |
1376 | ||
1377 | /* copy sg1entry data */ | |
1378 | sg1entry->l_key = lkey; | |
1379 | sg1entry->len = frag->size; | |
1380 | tmp_addr = (u64)(page_address(frag->page) | |
1381 | + frag->page_offset); | |
1382 | sg1entry->vaddr = tmp_addr; | |
1383 | swqe->descriptors++; | |
1384 | sg1entry_contains_frag_data = 1; | |
1385 | } | |
1386 | ||
1387 | for (i = sg1entry_contains_frag_data; i < nfrags; i++) { | |
1388 | ||
1389 | frag = &skb_shinfo(skb)->frags[i]; | |
1390 | sgentry = &sg_list[i - sg1entry_contains_frag_data]; | |
1391 | ||
1392 | sgentry->l_key = lkey; | |
1393 | sgentry->len = frag->size; | |
1394 | ||
1395 | tmp_addr = (u64)(page_address(frag->page) | |
1396 | + frag->page_offset); | |
1397 | sgentry->vaddr = tmp_addr; | |
1398 | swqe->descriptors++; | |
1399 | } | |
1400 | } | |
1401 | } | |
1402 | ||
1403 | static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |
1404 | { | |
1405 | int ret = 0; | |
1406 | u64 hret; | |
1407 | u8 reg_type; | |
1408 | ||
1409 | /* De/Register untagged packets */ | |
1410 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; | |
1411 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1412 | port->logical_port_id, | |
1413 | reg_type, port->mac_addr, 0, hcallid); | |
1414 | if (hret != H_SUCCESS) { | |
1415 | ehea_error("reg_dereg_bcmc failed (tagged)"); | |
1416 | ret = -EIO; | |
1417 | goto out_herr; | |
1418 | } | |
1419 | ||
1420 | /* De/Register VLAN packets */ | |
1421 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; | |
1422 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1423 | port->logical_port_id, | |
1424 | reg_type, port->mac_addr, 0, hcallid); | |
1425 | if (hret != H_SUCCESS) { | |
1426 | ehea_error("reg_dereg_bcmc failed (vlan)"); | |
1427 | ret = -EIO; | |
1428 | } | |
1429 | out_herr: | |
1430 | return ret; | |
1431 | } | |
1432 | ||
1433 | static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |
1434 | { | |
1435 | struct ehea_port *port = netdev_priv(dev); | |
1436 | struct sockaddr *mac_addr = sa; | |
1437 | struct hcp_ehea_port_cb0 *cb0; | |
1438 | int ret; | |
1439 | u64 hret; | |
1440 | ||
1441 | if (!is_valid_ether_addr(mac_addr->sa_data)) { | |
1442 | ret = -EADDRNOTAVAIL; | |
1443 | goto out; | |
1444 | } | |
1445 | ||
a1d261c5 | 1446 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1447 | if (!cb0) { |
1448 | ehea_error("no mem for cb0"); | |
1449 | ret = -ENOMEM; | |
1450 | goto out; | |
1451 | } | |
1452 | ||
1453 | memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); | |
1454 | ||
1455 | cb0->port_mac_addr = cb0->port_mac_addr >> 16; | |
1456 | ||
1457 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1458 | port->logical_port_id, H_PORT_CB0, | |
1459 | EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); | |
1460 | if (hret != H_SUCCESS) { | |
1461 | ret = -EIO; | |
1462 | goto out_free; | |
1463 | } | |
1464 | ||
1465 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); | |
1466 | ||
1467 | /* Deregister old MAC in pHYP */ | |
1468 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
1469 | if (ret) | |
1470 | goto out_free; | |
1471 | ||
1472 | port->mac_addr = cb0->port_mac_addr << 16; | |
1473 | ||
1474 | /* Register new MAC in pHYP */ | |
1475 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | |
1476 | if (ret) | |
1477 | goto out_free; | |
1478 | ||
1479 | ret = 0; | |
1480 | out_free: | |
1481 | kfree(cb0); | |
1482 | out: | |
1483 | return ret; | |
1484 | } | |
1485 | ||
1486 | static void ehea_promiscuous_error(u64 hret, int enable) | |
1487 | { | |
7674a588 TK |
1488 | if (hret == H_AUTHORITY) |
1489 | ehea_info("Hypervisor denied %sabling promiscuous mode", | |
1490 | enable == 1 ? "en" : "dis"); | |
1491 | else | |
1492 | ehea_error("failed %sabling promiscuous mode", | |
1493 | enable == 1 ? "en" : "dis"); | |
7a291083 JBT |
1494 | } |
1495 | ||
1496 | static void ehea_promiscuous(struct net_device *dev, int enable) | |
1497 | { | |
1498 | struct ehea_port *port = netdev_priv(dev); | |
1499 | struct hcp_ehea_port_cb7 *cb7; | |
1500 | u64 hret; | |
1501 | ||
1502 | if ((enable && port->promisc) || (!enable && !port->promisc)) | |
1503 | return; | |
1504 | ||
a1d261c5 | 1505 | cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
7a291083 JBT |
1506 | if (!cb7) { |
1507 | ehea_error("no mem for cb7"); | |
1508 | goto out; | |
1509 | } | |
1510 | ||
1511 | /* Modify Pxs_DUCQPN in CB7 */ | |
1512 | cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; | |
1513 | ||
1514 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1515 | port->logical_port_id, | |
1516 | H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); | |
1517 | if (hret) { | |
1518 | ehea_promiscuous_error(hret, enable); | |
1519 | goto out; | |
1520 | } | |
1521 | ||
1522 | port->promisc = enable; | |
1523 | out: | |
1524 | kfree(cb7); | |
1525 | return; | |
1526 | } | |
1527 | ||
1528 | static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, | |
1529 | u32 hcallid) | |
1530 | { | |
1531 | u64 hret; | |
1532 | u8 reg_type; | |
1533 | ||
1534 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | |
1535 | | EHEA_BCMC_UNTAGGED; | |
1536 | ||
1537 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1538 | port->logical_port_id, | |
1539 | reg_type, mc_mac_addr, 0, hcallid); | |
1540 | if (hret) | |
1541 | goto out; | |
1542 | ||
1543 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | |
1544 | | EHEA_BCMC_VLANID_ALL; | |
1545 | ||
1546 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1547 | port->logical_port_id, | |
1548 | reg_type, mc_mac_addr, 0, hcallid); | |
1549 | out: | |
1550 | return hret; | |
1551 | } | |
1552 | ||
1553 | static int ehea_drop_multicast_list(struct net_device *dev) | |
1554 | { | |
1555 | struct ehea_port *port = netdev_priv(dev); | |
1556 | struct ehea_mc_list *mc_entry = port->mc_list; | |
1557 | struct list_head *pos; | |
1558 | struct list_head *temp; | |
1559 | int ret = 0; | |
1560 | u64 hret; | |
1561 | ||
1562 | list_for_each_safe(pos, temp, &(port->mc_list->list)) { | |
1563 | mc_entry = list_entry(pos, struct ehea_mc_list, list); | |
1564 | ||
1565 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, | |
1566 | H_DEREG_BCMC); | |
1567 | if (hret) { | |
1568 | ehea_error("failed deregistering mcast MAC"); | |
1569 | ret = -EIO; | |
1570 | } | |
1571 | ||
1572 | list_del(pos); | |
1573 | kfree(mc_entry); | |
1574 | } | |
1575 | return ret; | |
1576 | } | |
1577 | ||
1578 | static void ehea_allmulti(struct net_device *dev, int enable) | |
1579 | { | |
1580 | struct ehea_port *port = netdev_priv(dev); | |
1581 | u64 hret; | |
1582 | ||
1583 | if (!port->allmulti) { | |
1584 | if (enable) { | |
1585 | /* Enable ALLMULTI */ | |
1586 | ehea_drop_multicast_list(dev); | |
1587 | hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); | |
1588 | if (!hret) | |
1589 | port->allmulti = 1; | |
1590 | else | |
1591 | ehea_error("failed enabling IFF_ALLMULTI"); | |
1592 | } | |
1593 | } else | |
1594 | if (!enable) { | |
1595 | /* Disable ALLMULTI */ | |
1596 | hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); | |
1597 | if (!hret) | |
1598 | port->allmulti = 0; | |
1599 | else | |
1600 | ehea_error("failed disabling IFF_ALLMULTI"); | |
1601 | } | |
1602 | } | |
1603 | ||
1604 | static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr) | |
1605 | { | |
1606 | struct ehea_mc_list *ehea_mcl_entry; | |
1607 | u64 hret; | |
1608 | ||
1e1675cc | 1609 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); |
7a291083 JBT |
1610 | if (!ehea_mcl_entry) { |
1611 | ehea_error("no mem for mcl_entry"); | |
1612 | return; | |
1613 | } | |
1614 | ||
1615 | INIT_LIST_HEAD(&ehea_mcl_entry->list); | |
1616 | ||
1617 | memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); | |
1618 | ||
1619 | hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, | |
1620 | H_REG_BCMC); | |
1621 | if (!hret) | |
1622 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); | |
1623 | else { | |
1624 | ehea_error("failed registering mcast MAC"); | |
1625 | kfree(ehea_mcl_entry); | |
1626 | } | |
1627 | } | |
1628 | ||
1629 | static void ehea_set_multicast_list(struct net_device *dev) | |
1630 | { | |
1631 | struct ehea_port *port = netdev_priv(dev); | |
1632 | struct dev_mc_list *k_mcl_entry; | |
1633 | int ret, i; | |
1634 | ||
1635 | if (dev->flags & IFF_PROMISC) { | |
1636 | ehea_promiscuous(dev, 1); | |
1637 | return; | |
1638 | } | |
1639 | ehea_promiscuous(dev, 0); | |
1640 | ||
1641 | if (dev->flags & IFF_ALLMULTI) { | |
1642 | ehea_allmulti(dev, 1); | |
1643 | return; | |
1644 | } | |
1645 | ehea_allmulti(dev, 0); | |
1646 | ||
1647 | if (dev->mc_count) { | |
1648 | ret = ehea_drop_multicast_list(dev); | |
1649 | if (ret) { | |
1650 | /* Dropping the current multicast list failed. | |
1651 | * Enabling ALL_MULTI is the best we can do. | |
1652 | */ | |
1653 | ehea_allmulti(dev, 1); | |
1654 | } | |
1655 | ||
1656 | if (dev->mc_count > port->adapter->max_mc_mac) { | |
1657 | ehea_info("Mcast registration limit reached (0x%lx). " | |
1658 | "Use ALLMULTI!", | |
1659 | port->adapter->max_mc_mac); | |
1660 | goto out; | |
1661 | } | |
1662 | ||
1663 | for (i = 0, k_mcl_entry = dev->mc_list; | |
1664 | i < dev->mc_count; | |
1665 | i++, k_mcl_entry = k_mcl_entry->next) { | |
1666 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); | |
1667 | } | |
1668 | } | |
1669 | out: | |
1670 | return; | |
1671 | } | |
1672 | ||
1673 | static int ehea_change_mtu(struct net_device *dev, int new_mtu) | |
1674 | { | |
1675 | if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) | |
1676 | return -EINVAL; | |
1677 | dev->mtu = new_mtu; | |
1678 | return 0; | |
1679 | } | |
1680 | ||
1681 | static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |
1682 | struct ehea_swqe *swqe, u32 lkey) | |
1683 | { | |
1684 | if (skb->protocol == htons(ETH_P_IP)) { | |
1685 | /* IPv4 */ | |
1686 | swqe->tx_control |= EHEA_SWQE_CRC | |
1687 | | EHEA_SWQE_IP_CHECKSUM | |
1688 | | EHEA_SWQE_TCP_CHECKSUM | |
1689 | | EHEA_SWQE_IMM_DATA_PRESENT | |
1690 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | |
1691 | ||
1692 | write_ip_start_end(swqe, skb); | |
1693 | ||
1694 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | |
1695 | if ((skb->nh.iph->frag_off & IP_MF) || | |
1696 | (skb->nh.iph->frag_off & IP_OFFSET)) | |
1697 | /* IP fragment, so don't change cs */ | |
1698 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | |
1699 | else | |
1700 | write_udp_offset_end(swqe, skb); | |
1701 | ||
1702 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) { | |
1703 | write_tcp_offset_end(swqe, skb); | |
1704 | } | |
1705 | ||
1706 | /* icmp (big data) and ip segmentation packets (all other ip | |
1707 | packets) do not require any special handling */ | |
1708 | ||
1709 | } else { | |
1710 | /* Other Ethernet Protocol */ | |
1711 | swqe->tx_control |= EHEA_SWQE_CRC | |
1712 | | EHEA_SWQE_IMM_DATA_PRESENT | |
1713 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | |
1714 | } | |
1715 | ||
1716 | write_swqe2_data(skb, dev, swqe, lkey); | |
1717 | } | |
1718 | ||
1719 | static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |
1720 | struct ehea_swqe *swqe) | |
1721 | { | |
1722 | int nfrags = skb_shinfo(skb)->nr_frags; | |
1723 | u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; | |
1724 | skb_frag_t *frag; | |
1725 | int i; | |
1726 | ||
1727 | if (skb->protocol == htons(ETH_P_IP)) { | |
1728 | /* IPv4 */ | |
1729 | write_ip_start_end(swqe, skb); | |
1730 | ||
1731 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | |
1732 | swqe->tx_control |= EHEA_SWQE_CRC | |
1733 | | EHEA_SWQE_IP_CHECKSUM | |
1734 | | EHEA_SWQE_TCP_CHECKSUM | |
1735 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1736 | ||
1737 | write_tcp_offset_end(swqe, skb); | |
1738 | ||
1739 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | |
1740 | if ((skb->nh.iph->frag_off & IP_MF) || | |
1741 | (skb->nh.iph->frag_off & IP_OFFSET)) | |
1742 | /* IP fragment, so don't change cs */ | |
1743 | swqe->tx_control |= EHEA_SWQE_CRC | |
1744 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1745 | else { | |
1746 | swqe->tx_control |= EHEA_SWQE_CRC | |
1747 | | EHEA_SWQE_IP_CHECKSUM | |
1748 | | EHEA_SWQE_TCP_CHECKSUM | |
1749 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1750 | ||
1751 | write_udp_offset_end(swqe, skb); | |
1752 | } | |
1753 | } else { | |
1754 | /* icmp (big data) and | |
1755 | ip segmentation packets (all other ip packets) */ | |
1756 | swqe->tx_control |= EHEA_SWQE_CRC | |
1757 | | EHEA_SWQE_IP_CHECKSUM | |
1758 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1759 | } | |
1760 | } else { | |
1761 | /* Other Ethernet Protocol */ | |
1762 | swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; | |
1763 | } | |
1764 | /* copy (immediate) data */ | |
1765 | if (nfrags == 0) { | |
1766 | /* data is in a single piece */ | |
1767 | memcpy(imm_data, skb->data, skb->len); | |
1768 | } else { | |
1769 | /* first copy data from the skb->data buffer ... */ | |
1770 | memcpy(imm_data, skb->data, skb->len - skb->data_len); | |
1771 | imm_data += skb->len - skb->data_len; | |
1772 | ||
1773 | /* ... then copy data from the fragments */ | |
1774 | for (i = 0; i < nfrags; i++) { | |
1775 | frag = &skb_shinfo(skb)->frags[i]; | |
1776 | memcpy(imm_data, | |
1777 | page_address(frag->page) + frag->page_offset, | |
1778 | frag->size); | |
1779 | imm_data += frag->size; | |
1780 | } | |
1781 | } | |
1782 | swqe->immediate_data_length = skb->len; | |
1783 | dev_kfree_skb(skb); | |
1784 | } | |
1785 | ||
1786 | static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1787 | { | |
1788 | struct ehea_port *port = netdev_priv(dev); | |
1789 | struct ehea_swqe *swqe; | |
1790 | unsigned long flags; | |
1791 | u32 lkey; | |
1792 | int swqe_index; | |
1793 | struct ehea_port_res *pr = &port->port_res[0]; | |
1794 | ||
1795 | spin_lock(&pr->xmit_lock); | |
1796 | ||
1797 | swqe = ehea_get_swqe(pr->qp, &swqe_index); | |
1798 | memset(swqe, 0, SWQE_HEADER_SIZE); | |
1799 | atomic_dec(&pr->swqe_avail); | |
1800 | ||
1801 | if (skb->len <= SWQE3_MAX_IMM) { | |
1802 | u32 sig_iv = port->sig_comp_iv; | |
1803 | u32 swqe_num = pr->swqe_id_counter; | |
1804 | ehea_xmit3(skb, dev, swqe); | |
1805 | swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) | |
1806 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); | |
1807 | if (pr->swqe_ll_count >= (sig_iv - 1)) { | |
1808 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | |
1809 | sig_iv); | |
1810 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | |
1811 | pr->swqe_ll_count = 0; | |
1812 | } else | |
1813 | pr->swqe_ll_count += 1; | |
1814 | } else { | |
1815 | swqe->wr_id = | |
1816 | EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) | |
1817 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) | |
1818 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); | |
1819 | pr->sq_skba.arr[pr->sq_skba.index] = skb; | |
1820 | ||
1821 | pr->sq_skba.index++; | |
1822 | pr->sq_skba.index &= (pr->sq_skba.len - 1); | |
1823 | ||
1824 | lkey = pr->send_mr.lkey; | |
1825 | ehea_xmit2(skb, dev, swqe, lkey); | |
1826 | ||
1827 | if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) { | |
1828 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | |
1829 | EHEA_SIG_IV_LONG); | |
1830 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | |
1831 | pr->swqe_count = 0; | |
1832 | } else | |
1833 | pr->swqe_count += 1; | |
1834 | } | |
1835 | pr->swqe_id_counter += 1; | |
1836 | ||
1837 | if (port->vgrp && vlan_tx_tag_present(skb)) { | |
1838 | swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; | |
1839 | swqe->vlan_tag = vlan_tx_tag_get(skb); | |
1840 | } | |
1841 | ||
1842 | if (netif_msg_tx_queued(port)) { | |
1843 | ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); | |
bff0a55f | 1844 | ehea_dump(swqe, 512, "swqe"); |
7a291083 JBT |
1845 | } |
1846 | ||
1847 | ehea_post_swqe(pr->qp, swqe); | |
1848 | pr->tx_packets++; | |
1849 | ||
1850 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | |
1851 | spin_lock_irqsave(&pr->netif_queue, flags); | |
1852 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | |
1853 | netif_stop_queue(dev); | |
1854 | pr->queue_stopped = 1; | |
1855 | } | |
1856 | spin_unlock_irqrestore(&pr->netif_queue, flags); | |
1857 | } | |
1858 | dev->trans_start = jiffies; | |
1859 | spin_unlock(&pr->xmit_lock); | |
1860 | ||
1861 | return NETDEV_TX_OK; | |
1862 | } | |
1863 | ||
1864 | static void ehea_vlan_rx_register(struct net_device *dev, | |
1865 | struct vlan_group *grp) | |
1866 | { | |
1867 | struct ehea_port *port = netdev_priv(dev); | |
1868 | struct ehea_adapter *adapter = port->adapter; | |
1869 | struct hcp_ehea_port_cb1 *cb1; | |
1870 | u64 hret; | |
1871 | ||
1872 | port->vgrp = grp; | |
1873 | ||
a1d261c5 | 1874 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1875 | if (!cb1) { |
1876 | ehea_error("no mem for cb1"); | |
1877 | goto out; | |
1878 | } | |
1879 | ||
1880 | if (grp) | |
1881 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | |
1882 | else | |
1883 | memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter)); | |
1884 | ||
1885 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1886 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1887 | if (hret != H_SUCCESS) | |
1888 | ehea_error("modify_ehea_port failed"); | |
1889 | ||
1890 | kfree(cb1); | |
1891 | out: | |
1892 | return; | |
1893 | } | |
1894 | ||
1895 | static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |
1896 | { | |
1897 | struct ehea_port *port = netdev_priv(dev); | |
1898 | struct ehea_adapter *adapter = port->adapter; | |
1899 | struct hcp_ehea_port_cb1 *cb1; | |
1900 | int index; | |
1901 | u64 hret; | |
1902 | ||
a1d261c5 | 1903 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1904 | if (!cb1) { |
1905 | ehea_error("no mem for cb1"); | |
1906 | goto out; | |
1907 | } | |
1908 | ||
1909 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | |
1910 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1911 | if (hret != H_SUCCESS) { | |
1912 | ehea_error("query_ehea_port failed"); | |
1913 | goto out; | |
1914 | } | |
1915 | ||
1916 | index = (vid / 64); | |
1917 | cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F))); | |
1918 | ||
1919 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1920 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1921 | if (hret != H_SUCCESS) | |
1922 | ehea_error("modify_ehea_port failed"); | |
1923 | out: | |
1924 | kfree(cb1); | |
1925 | return; | |
1926 | } | |
1927 | ||
1928 | static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
1929 | { | |
1930 | struct ehea_port *port = netdev_priv(dev); | |
1931 | struct ehea_adapter *adapter = port->adapter; | |
1932 | struct hcp_ehea_port_cb1 *cb1; | |
1933 | int index; | |
1934 | u64 hret; | |
1935 | ||
1936 | if (port->vgrp) | |
1937 | port->vgrp->vlan_devices[vid] = NULL; | |
1938 | ||
a1d261c5 | 1939 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1940 | if (!cb1) { |
1941 | ehea_error("no mem for cb1"); | |
1942 | goto out; | |
1943 | } | |
1944 | ||
1945 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | |
1946 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1947 | if (hret != H_SUCCESS) { | |
1948 | ehea_error("query_ehea_port failed"); | |
1949 | goto out; | |
1950 | } | |
1951 | ||
1952 | index = (vid / 64); | |
1953 | cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F))); | |
1954 | ||
1955 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1956 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1957 | if (hret != H_SUCCESS) | |
1958 | ehea_error("modify_ehea_port failed"); | |
1959 | out: | |
1960 | kfree(cb1); | |
1961 | return; | |
1962 | } | |
1963 | ||
1964 | int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |
1965 | { | |
1966 | int ret = -EIO; | |
1967 | u64 hret; | |
1968 | u16 dummy16 = 0; | |
1969 | u64 dummy64 = 0; | |
1970 | struct hcp_modify_qp_cb0* cb0; | |
1971 | ||
a1d261c5 | 1972 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1973 | if (!cb0) { |
1974 | ret = -ENOMEM; | |
1975 | goto out; | |
1976 | } | |
1977 | ||
1978 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
1979 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
1980 | if (hret != H_SUCCESS) { | |
1981 | ehea_error("query_ehea_qp failed (1)"); | |
1982 | goto out; | |
1983 | } | |
1984 | ||
1985 | cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; | |
1986 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
1987 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
1988 | &dummy64, &dummy64, &dummy16, &dummy16); | |
1989 | if (hret != H_SUCCESS) { | |
1990 | ehea_error("modify_ehea_qp failed (1)"); | |
1991 | goto out; | |
1992 | } | |
1993 | ||
1994 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
1995 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
1996 | if (hret != H_SUCCESS) { | |
1997 | ehea_error("query_ehea_qp failed (2)"); | |
1998 | goto out; | |
1999 | } | |
2000 | ||
2001 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; | |
2002 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2003 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
2004 | &dummy64, &dummy64, &dummy16, &dummy16); | |
2005 | if (hret != H_SUCCESS) { | |
2006 | ehea_error("modify_ehea_qp failed (2)"); | |
2007 | goto out; | |
2008 | } | |
2009 | ||
2010 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2011 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
2012 | if (hret != H_SUCCESS) { | |
2013 | ehea_error("query_ehea_qp failed (3)"); | |
2014 | goto out; | |
2015 | } | |
2016 | ||
2017 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; | |
2018 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2019 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
2020 | &dummy64, &dummy64, &dummy16, &dummy16); | |
2021 | if (hret != H_SUCCESS) { | |
2022 | ehea_error("modify_ehea_qp failed (3)"); | |
2023 | goto out; | |
2024 | } | |
2025 | ||
2026 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2027 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
2028 | if (hret != H_SUCCESS) { | |
2029 | ehea_error("query_ehea_qp failed (4)"); | |
2030 | goto out; | |
2031 | } | |
2032 | ||
2033 | ret = 0; | |
2034 | out: | |
2035 | kfree(cb0); | |
2036 | return ret; | |
2037 | } | |
2038 | ||
2039 | static int ehea_port_res_setup(struct ehea_port *port, int def_qps, | |
2040 | int add_tx_qps) | |
2041 | { | |
2042 | int ret, i; | |
2043 | struct port_res_cfg pr_cfg, pr_cfg_small_rx; | |
2044 | enum ehea_eq_type eq_type = EHEA_EQ; | |
2045 | ||
2046 | port->qp_eq = ehea_create_eq(port->adapter, eq_type, | |
2047 | EHEA_MAX_ENTRIES_EQ, 1); | |
2048 | if (!port->qp_eq) { | |
2049 | ret = -EINVAL; | |
2050 | ehea_error("ehea_create_eq failed (qp_eq)"); | |
2051 | goto out_kill_eq; | |
2052 | } | |
2053 | ||
2054 | pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; | |
2055 | pr_cfg.max_entries_scq = sq_entries; | |
2056 | pr_cfg.max_entries_sq = sq_entries; | |
2057 | pr_cfg.max_entries_rq1 = rq1_entries; | |
2058 | pr_cfg.max_entries_rq2 = rq2_entries; | |
2059 | pr_cfg.max_entries_rq3 = rq3_entries; | |
2060 | ||
2061 | pr_cfg_small_rx.max_entries_rcq = 1; | |
2062 | pr_cfg_small_rx.max_entries_scq = sq_entries; | |
2063 | pr_cfg_small_rx.max_entries_sq = sq_entries; | |
2064 | pr_cfg_small_rx.max_entries_rq1 = 1; | |
2065 | pr_cfg_small_rx.max_entries_rq2 = 1; | |
2066 | pr_cfg_small_rx.max_entries_rq3 = 1; | |
2067 | ||
2068 | for (i = 0; i < def_qps; i++) { | |
2069 | ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); | |
2070 | if (ret) | |
2071 | goto out_clean_pr; | |
2072 | } | |
2073 | for (i = def_qps; i < def_qps + add_tx_qps; i++) { | |
2074 | ret = ehea_init_port_res(port, &port->port_res[i], | |
2075 | &pr_cfg_small_rx, i); | |
2076 | if (ret) | |
2077 | goto out_clean_pr; | |
2078 | } | |
2079 | ||
2080 | return 0; | |
2081 | ||
2082 | out_clean_pr: | |
2083 | while (--i >= 0) | |
2084 | ehea_clean_portres(port, &port->port_res[i]); | |
2085 | ||
2086 | out_kill_eq: | |
2087 | ehea_destroy_eq(port->qp_eq); | |
2088 | return ret; | |
2089 | } | |
2090 | ||
2091 | static int ehea_clean_all_portres(struct ehea_port *port) | |
2092 | { | |
2093 | int ret = 0; | |
2094 | int i; | |
2095 | ||
2096 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | |
2097 | ret |= ehea_clean_portres(port, &port->port_res[i]); | |
2098 | ||
2099 | ret |= ehea_destroy_eq(port->qp_eq); | |
2100 | ||
2101 | return ret; | |
2102 | } | |
2103 | ||
2104 | static int ehea_up(struct net_device *dev) | |
2105 | { | |
2106 | int ret, i; | |
2107 | struct ehea_port *port = netdev_priv(dev); | |
2108 | u64 mac_addr = 0; | |
2109 | ||
2110 | if (port->state == EHEA_PORT_UP) | |
2111 | return 0; | |
2112 | ||
2113 | ret = ehea_port_res_setup(port, port->num_def_qps, | |
2114 | port->num_add_tx_qps); | |
2115 | if (ret) { | |
2116 | ehea_error("port_res_failed"); | |
2117 | goto out; | |
2118 | } | |
2119 | ||
2120 | /* Set default QP for this port */ | |
2121 | ret = ehea_configure_port(port); | |
2122 | if (ret) { | |
2123 | ehea_error("ehea_configure_port failed. ret:%d", ret); | |
2124 | goto out_clean_pr; | |
2125 | } | |
2126 | ||
2127 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | |
2128 | if (ret) { | |
2129 | ret = -EIO; | |
2130 | ehea_error("out_clean_pr"); | |
2131 | goto out_clean_pr; | |
2132 | } | |
2133 | mac_addr = (*(u64*)dev->dev_addr) >> 16; | |
2134 | ||
2135 | ret = ehea_reg_interrupts(dev); | |
2136 | if (ret) { | |
2137 | ehea_error("out_dereg_bc"); | |
2138 | goto out_dereg_bc; | |
2139 | } | |
2140 | ||
2141 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
2142 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | |
2143 | if (ret) { | |
2144 | ehea_error("activate_qp failed"); | |
2145 | goto out_free_irqs; | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | for(i = 0; i < port->num_def_qps; i++) { | |
2150 | ret = ehea_fill_port_res(&port->port_res[i]); | |
2151 | if (ret) { | |
2152 | ehea_error("out_free_irqs"); | |
2153 | goto out_free_irqs; | |
2154 | } | |
2155 | } | |
2156 | ||
2157 | ret = 0; | |
2158 | port->state = EHEA_PORT_UP; | |
2159 | goto out; | |
2160 | ||
2161 | out_free_irqs: | |
2162 | ehea_free_interrupts(dev); | |
2163 | ||
2164 | out_dereg_bc: | |
2165 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
2166 | ||
2167 | out_clean_pr: | |
2168 | ehea_clean_all_portres(port); | |
2169 | out: | |
2170 | return ret; | |
2171 | } | |
2172 | ||
2173 | static int ehea_open(struct net_device *dev) | |
2174 | { | |
2175 | int ret; | |
2176 | struct ehea_port *port = netdev_priv(dev); | |
2177 | ||
2178 | down(&port->port_lock); | |
2179 | ||
2180 | if (netif_msg_ifup(port)) | |
2181 | ehea_info("enabling port %s", dev->name); | |
2182 | ||
2183 | ret = ehea_up(dev); | |
2184 | if (!ret) | |
2185 | netif_start_queue(dev); | |
2186 | ||
2187 | up(&port->port_lock); | |
2188 | ||
2189 | return ret; | |
2190 | } | |
2191 | ||
2192 | static int ehea_down(struct net_device *dev) | |
2193 | { | |
2194 | int ret, i; | |
2195 | struct ehea_port *port = netdev_priv(dev); | |
2196 | ||
2197 | if (port->state == EHEA_PORT_DOWN) | |
2198 | return 0; | |
2199 | ||
2200 | ehea_drop_multicast_list(dev); | |
2201 | ehea_free_interrupts(dev); | |
2202 | ||
2203 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | |
2204 | tasklet_kill(&port->port_res[i].send_comp_task); | |
2205 | ||
2206 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
2207 | ret = ehea_clean_all_portres(port); | |
2208 | port->state = EHEA_PORT_DOWN; | |
2209 | return ret; | |
2210 | } | |
2211 | ||
2212 | static int ehea_stop(struct net_device *dev) | |
2213 | { | |
2214 | int ret; | |
2215 | struct ehea_port *port = netdev_priv(dev); | |
2216 | ||
2217 | if (netif_msg_ifdown(port)) | |
2218 | ehea_info("disabling port %s", dev->name); | |
2219 | ||
2220 | flush_workqueue(port->adapter->ehea_wq); | |
2221 | down(&port->port_lock); | |
2222 | netif_stop_queue(dev); | |
2223 | ret = ehea_down(dev); | |
2224 | up(&port->port_lock); | |
2225 | return ret; | |
2226 | } | |
2227 | ||
c4028958 | 2228 | static void ehea_reset_port(struct work_struct *work) |
7a291083 JBT |
2229 | { |
2230 | int ret; | |
c4028958 DH |
2231 | struct ehea_port *port = |
2232 | container_of(work, struct ehea_port, reset_task); | |
2233 | struct net_device *dev = port->netdev; | |
7a291083 JBT |
2234 | |
2235 | port->resets++; | |
2236 | down(&port->port_lock); | |
2237 | netif_stop_queue(dev); | |
2238 | netif_poll_disable(dev); | |
2239 | ||
2240 | ret = ehea_down(dev); | |
2241 | if (ret) | |
2242 | ehea_error("ehea_down failed. not all resources are freed"); | |
2243 | ||
2244 | ret = ehea_up(dev); | |
2245 | if (ret) { | |
2246 | ehea_error("Reset device %s failed: ret=%d", dev->name, ret); | |
2247 | goto out; | |
2248 | } | |
2249 | ||
2250 | if (netif_msg_timer(port)) | |
2251 | ehea_info("Device %s resetted successfully", dev->name); | |
2252 | ||
2253 | netif_poll_enable(dev); | |
2254 | netif_wake_queue(dev); | |
2255 | out: | |
2256 | up(&port->port_lock); | |
2257 | return; | |
2258 | } | |
2259 | ||
2260 | static void ehea_tx_watchdog(struct net_device *dev) | |
2261 | { | |
2262 | struct ehea_port *port = netdev_priv(dev); | |
2263 | ||
2264 | if (netif_carrier_ok(dev)) | |
2265 | queue_work(port->adapter->ehea_wq, &port->reset_task); | |
2266 | } | |
2267 | ||
2268 | int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | |
2269 | { | |
2270 | struct hcp_query_ehea *cb; | |
4e996b32 TK |
2271 | struct device_node *lhea_dn = NULL; |
2272 | struct device_node *eth_dn = NULL; | |
7a291083 JBT |
2273 | u64 hret; |
2274 | int ret; | |
2275 | ||
a1d261c5 | 2276 | cb = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
2277 | if (!cb) { |
2278 | ret = -ENOMEM; | |
2279 | goto out; | |
2280 | } | |
2281 | ||
2282 | hret = ehea_h_query_ehea(adapter->handle, cb); | |
2283 | ||
2284 | if (hret != H_SUCCESS) { | |
2285 | ret = -EIO; | |
2286 | goto out_herr; | |
2287 | } | |
2288 | ||
4e996b32 TK |
2289 | /* Determine the number of available logical ports |
2290 | * by counting the child nodes of the lhea OFDT entry | |
2291 | */ | |
2292 | adapter->num_ports = 0; | |
2293 | lhea_dn = of_find_node_by_name(lhea_dn, "lhea"); | |
2294 | do { | |
2295 | eth_dn = of_get_next_child(lhea_dn, eth_dn); | |
2296 | if (eth_dn) | |
2297 | adapter->num_ports++; | |
2298 | } while ( eth_dn ); | |
2299 | of_node_put(lhea_dn); | |
2300 | ||
7a291083 JBT |
2301 | adapter->max_mc_mac = cb->max_mc_mac - 1; |
2302 | ret = 0; | |
2303 | ||
2304 | out_herr: | |
2305 | kfree(cb); | |
2306 | out: | |
2307 | return ret; | |
2308 | } | |
2309 | ||
2310 | static int ehea_setup_single_port(struct ehea_port *port, | |
2311 | struct device_node *dn) | |
2312 | { | |
2313 | int ret; | |
2314 | u64 hret; | |
2315 | struct net_device *dev = port->netdev; | |
2316 | struct ehea_adapter *adapter = port->adapter; | |
2317 | struct hcp_ehea_port_cb4 *cb4; | |
2318 | u32 *dn_log_port_id; | |
2319 | ||
2320 | sema_init(&port->port_lock, 1); | |
2321 | port->state = EHEA_PORT_DOWN; | |
2322 | port->sig_comp_iv = sq_entries / 10; | |
2323 | ||
2324 | if (!dn) { | |
2325 | ehea_error("bad device node: dn=%p", dn); | |
2326 | ret = -EINVAL; | |
2327 | goto out; | |
2328 | } | |
2329 | ||
2330 | port->of_dev_node = dn; | |
2331 | ||
2332 | /* Determine logical port id */ | |
2333 | dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL); | |
2334 | ||
2335 | if (!dn_log_port_id) { | |
2336 | ehea_error("bad device node: dn_log_port_id=%p", | |
2337 | dn_log_port_id); | |
2338 | ret = -EINVAL; | |
2339 | goto out; | |
2340 | } | |
2341 | port->logical_port_id = *dn_log_port_id; | |
2342 | ||
2343 | port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); | |
2344 | if (!port->mc_list) { | |
2345 | ret = -ENOMEM; | |
2346 | goto out; | |
2347 | } | |
2348 | ||
2349 | INIT_LIST_HEAD(&port->mc_list->list); | |
2350 | ||
7a291083 JBT |
2351 | ret = ehea_sense_port_attr(port); |
2352 | if (ret) | |
2353 | goto out; | |
2354 | ||
2355 | /* Enable Jumbo frames */ | |
a1d261c5 | 2356 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
2357 | if (!cb4) { |
2358 | ehea_error("no mem for cb4"); | |
2359 | } else { | |
2360 | cb4->jumbo_frame = 1; | |
2361 | hret = ehea_h_modify_ehea_port(adapter->handle, | |
2362 | port->logical_port_id, | |
2363 | H_PORT_CB4, H_PORT_CB4_JUMBO, | |
2364 | cb4); | |
2365 | if (hret != H_SUCCESS) { | |
2366 | ehea_info("Jumbo frames not activated"); | |
2367 | } | |
2368 | kfree(cb4); | |
2369 | } | |
2370 | ||
2371 | /* initialize net_device structure */ | |
2372 | SET_MODULE_OWNER(dev); | |
2373 | ||
2374 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); | |
2375 | ||
2376 | dev->open = ehea_open; | |
2377 | dev->poll = ehea_poll; | |
2378 | dev->weight = 64; | |
2379 | dev->stop = ehea_stop; | |
2380 | dev->hard_start_xmit = ehea_start_xmit; | |
2381 | dev->get_stats = ehea_get_stats; | |
2382 | dev->set_multicast_list = ehea_set_multicast_list; | |
2383 | dev->set_mac_address = ehea_set_mac_addr; | |
2384 | dev->change_mtu = ehea_change_mtu; | |
2385 | dev->vlan_rx_register = ehea_vlan_rx_register; | |
2386 | dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid; | |
2387 | dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid; | |
2388 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | |
2389 | | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | |
2390 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | |
2391 | | NETIF_F_LLTX; | |
2392 | dev->tx_timeout = &ehea_tx_watchdog; | |
2393 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | |
2394 | ||
c4028958 | 2395 | INIT_WORK(&port->reset_task, ehea_reset_port); |
7a291083 JBT |
2396 | |
2397 | ehea_set_ethtool_ops(dev); | |
2398 | ||
2399 | ret = register_netdev(dev); | |
2400 | if (ret) { | |
2401 | ehea_error("register_netdev failed. ret=%d", ret); | |
2402 | goto out_free; | |
2403 | } | |
2404 | ||
2405 | port->netdev = dev; | |
2406 | ret = 0; | |
2407 | goto out; | |
2408 | ||
2409 | out_free: | |
2410 | kfree(port->mc_list); | |
2411 | out: | |
2412 | return ret; | |
2413 | } | |
2414 | ||
2415 | static int ehea_setup_ports(struct ehea_adapter *adapter) | |
2416 | { | |
2417 | int ret; | |
2418 | int port_setup_ok = 0; | |
2419 | struct ehea_port *port; | |
2420 | struct device_node *dn = NULL; | |
2421 | struct net_device *dev; | |
2422 | int i; | |
2423 | ||
2424 | /* get port properties for all ports */ | |
2425 | for (i = 0; i < adapter->num_ports; i++) { | |
2426 | ||
2427 | if (adapter->port[i]) | |
2428 | continue; /* port already up and running */ | |
2429 | ||
2430 | /* allocate memory for the port structures */ | |
2431 | dev = alloc_etherdev(sizeof(struct ehea_port)); | |
2432 | ||
2433 | if (!dev) { | |
2434 | ehea_error("no mem for net_device"); | |
2435 | break; | |
2436 | } | |
2437 | ||
2438 | port = netdev_priv(dev); | |
2439 | port->adapter = adapter; | |
2440 | port->netdev = dev; | |
2441 | adapter->port[i] = port; | |
2442 | port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); | |
2443 | ||
2444 | dn = of_find_node_by_name(dn, "ethernet"); | |
2445 | ret = ehea_setup_single_port(port, dn); | |
2446 | if (ret) { | |
2447 | /* Free mem for this port struct. The others will be | |
2448 | processed on rollback */ | |
2449 | free_netdev(dev); | |
2450 | adapter->port[i] = NULL; | |
2451 | ehea_error("eHEA port %d setup failed, ret=%d", i, ret); | |
2452 | } | |
2453 | } | |
2454 | ||
2455 | of_node_put(dn); | |
2456 | ||
2457 | /* Check for succesfully set up ports */ | |
2458 | for (i = 0; i < adapter->num_ports; i++) | |
2459 | if (adapter->port[i]) | |
2460 | port_setup_ok++; | |
2461 | ||
2462 | if (port_setup_ok) | |
2463 | ret = 0; /* At least some ports are setup correctly */ | |
2464 | else | |
2465 | ret = -EINVAL; | |
2466 | ||
2467 | return ret; | |
2468 | } | |
2469 | ||
2470 | static int __devinit ehea_probe(struct ibmebus_dev *dev, | |
2471 | const struct of_device_id *id) | |
2472 | { | |
2473 | struct ehea_adapter *adapter; | |
2474 | u64 *adapter_handle; | |
2475 | int ret; | |
2476 | ||
2477 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | |
2478 | if (!adapter) { | |
2479 | ret = -ENOMEM; | |
2480 | dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n"); | |
2481 | goto out; | |
2482 | } | |
2483 | ||
2484 | adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle", | |
2485 | NULL); | |
061bf3cd TK |
2486 | if (adapter_handle) |
2487 | adapter->handle = *adapter_handle; | |
2488 | ||
2489 | if (!adapter->handle) { | |
7a291083 JBT |
2490 | dev_err(&dev->ofdev.dev, "failed getting handle for adapter" |
2491 | " '%s'\n", dev->ofdev.node->full_name); | |
2492 | ret = -ENODEV; | |
2493 | goto out_free_ad; | |
2494 | } | |
2495 | ||
7a291083 JBT |
2496 | adapter->pd = EHEA_PD_ID; |
2497 | ||
2498 | dev->ofdev.dev.driver_data = adapter; | |
2499 | ||
2500 | ret = ehea_reg_mr_adapter(adapter); | |
2501 | if (ret) { | |
2502 | dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n"); | |
2503 | goto out_free_ad; | |
2504 | } | |
2505 | ||
2506 | /* initialize adapter and ports */ | |
2507 | /* get adapter properties */ | |
2508 | ret = ehea_sense_adapter_attr(adapter); | |
2509 | if (ret) { | |
2510 | dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); | |
2511 | goto out_free_res; | |
2512 | } | |
2513 | dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports); | |
2514 | ||
2515 | adapter->neq = ehea_create_eq(adapter, | |
2516 | EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); | |
2517 | if (!adapter->neq) { | |
2518 | dev_err(&dev->ofdev.dev, "NEQ creation failed"); | |
2519 | goto out_free_res; | |
2520 | } | |
2521 | ||
2522 | tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, | |
2523 | (unsigned long)adapter); | |
2524 | ||
2525 | ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1, | |
2526 | ehea_interrupt_neq, SA_INTERRUPT, | |
2527 | "ehea_neq", adapter); | |
2528 | if (ret) { | |
2529 | dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed"); | |
2530 | goto out_kill_eq; | |
2531 | } | |
2532 | ||
2533 | adapter->ehea_wq = create_workqueue("ehea_wq"); | |
2534 | if (!adapter->ehea_wq) | |
2535 | goto out_free_irq; | |
2536 | ||
2537 | ret = ehea_setup_ports(adapter); | |
2538 | if (ret) { | |
2539 | dev_err(&dev->ofdev.dev, "setup_ports failed"); | |
2540 | goto out_kill_wq; | |
2541 | } | |
2542 | ||
2543 | ret = 0; | |
2544 | goto out; | |
2545 | ||
2546 | out_kill_wq: | |
2547 | destroy_workqueue(adapter->ehea_wq); | |
2548 | ||
2549 | out_free_irq: | |
2550 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | |
2551 | ||
2552 | out_kill_eq: | |
2553 | ehea_destroy_eq(adapter->neq); | |
2554 | ||
2555 | out_free_res: | |
2556 | ehea_h_free_resource(adapter->handle, adapter->mr.handle); | |
2557 | ||
2558 | out_free_ad: | |
2559 | kfree(adapter); | |
2560 | out: | |
2561 | return ret; | |
2562 | } | |
2563 | ||
2564 | static void ehea_shutdown_single_port(struct ehea_port *port) | |
2565 | { | |
2566 | unregister_netdev(port->netdev); | |
2567 | kfree(port->mc_list); | |
2568 | free_netdev(port->netdev); | |
2569 | } | |
2570 | ||
2571 | static int __devexit ehea_remove(struct ibmebus_dev *dev) | |
2572 | { | |
2573 | struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; | |
2574 | u64 hret; | |
2575 | int i; | |
2576 | ||
2577 | for (i = 0; i < adapter->num_ports; i++) | |
2578 | if (adapter->port[i]) { | |
2579 | ehea_shutdown_single_port(adapter->port[i]); | |
2580 | adapter->port[i] = NULL; | |
2581 | } | |
2582 | destroy_workqueue(adapter->ehea_wq); | |
2583 | ||
2584 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | |
2585 | ||
2586 | ehea_destroy_eq(adapter->neq); | |
2587 | ||
2588 | hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle); | |
2589 | if (hret) { | |
2590 | dev_err(&dev->ofdev.dev, "free_resource_mr failed"); | |
2591 | return -EIO; | |
2592 | } | |
2593 | kfree(adapter); | |
2594 | return 0; | |
2595 | } | |
2596 | ||
2597 | static int check_module_parm(void) | |
2598 | { | |
2599 | int ret = 0; | |
2600 | ||
2601 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || | |
2602 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { | |
2603 | ehea_info("Bad parameter: rq1_entries"); | |
2604 | ret = -EINVAL; | |
2605 | } | |
2606 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || | |
2607 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { | |
2608 | ehea_info("Bad parameter: rq2_entries"); | |
2609 | ret = -EINVAL; | |
2610 | } | |
2611 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || | |
2612 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { | |
2613 | ehea_info("Bad parameter: rq3_entries"); | |
2614 | ret = -EINVAL; | |
2615 | } | |
2616 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || | |
2617 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { | |
2618 | ehea_info("Bad parameter: sq_entries"); | |
2619 | ret = -EINVAL; | |
2620 | } | |
2621 | ||
2622 | return ret; | |
2623 | } | |
2624 | ||
2625 | static struct of_device_id ehea_device_table[] = { | |
2626 | { | |
2627 | .name = "lhea", | |
2628 | .compatible = "IBM,lhea", | |
2629 | }, | |
2630 | {}, | |
2631 | }; | |
2632 | ||
2633 | static struct ibmebus_driver ehea_driver = { | |
2634 | .name = "ehea", | |
2635 | .id_table = ehea_device_table, | |
2636 | .probe = ehea_probe, | |
2637 | .remove = ehea_remove, | |
2638 | }; | |
2639 | ||
2640 | int __init ehea_module_init(void) | |
2641 | { | |
2642 | int ret; | |
2643 | ||
2644 | printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", | |
2645 | DRV_VERSION); | |
2646 | ||
2647 | ret = check_module_parm(); | |
2648 | if (ret) | |
2649 | goto out; | |
2650 | ret = ibmebus_register_driver(&ehea_driver); | |
2651 | if (ret) | |
2652 | ehea_error("failed registering eHEA device driver on ebus"); | |
2653 | ||
2654 | out: | |
2655 | return ret; | |
2656 | } | |
2657 | ||
2658 | static void __exit ehea_module_exit(void) | |
2659 | { | |
2660 | ibmebus_unregister_driver(&ehea_driver); | |
2661 | } | |
2662 | ||
2663 | module_init(ehea_module_init); | |
2664 | module_exit(ehea_module_exit); |