]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/cavium/liquidio/octeon_main.h
1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 /*! \file octeon_main.h
19 * \brief Host Driver: This file is included by all host driver source files
20 * to include common definitions.
23 #ifndef _OCTEON_MAIN_H_
24 #define _OCTEON_MAIN_H_
26 #include <linux/sched/signal.h>
28 #if BITS_PER_LONG == 32
29 #define CVM_CAST64(v) ((long long)(v))
30 #elif BITS_PER_LONG == 64
31 #define CVM_CAST64(v) ((long long)(long)(v))
33 #error "Unknown system architecture"
36 #define DRV_NAME "LiquidIO"
38 /** This structure is used by NIC driver to store information required
39 * to free the sk_buff when the packet has been fetched by Octeon.
40 * Bytes offset below assume worst-case of a 64-bit system.
42 struct octnet_buf_free_info
{
43 /** Bytes 1-8. Pointer to network device private structure. */
46 /** Bytes 9-16. Pointer to sk_buff. */
49 /** Bytes 17-24. Pointer to gather list. */
50 struct octnic_gather
*g
;
52 /** Bytes 25-32. Physical address of skb->data or gather list. */
55 /** Bytes 33-47. Piggybacked soft command, if any */
56 struct octeon_soft_command
*sc
;
59 /* BQL-related functions */
60 void octeon_report_sent_bytes_to_bql(void *buf
, int reqtype
);
61 void octeon_update_tx_completion_counters(void *buf
, int reqtype
,
62 unsigned int *pkts_compl
,
63 unsigned int *bytes_compl
);
64 void octeon_report_tx_completion_to_bql(void *txq
, unsigned int pkts_compl
,
65 unsigned int bytes_compl
);
66 void octeon_pf_changed_vf_macaddr(struct octeon_device
*oct
, u8
*mac
);
68 static inline void octeon_swap_8B_data(u64
*data
, u32 blocks
)
78 * \brief unmaps a PCI BAR
79 * @param oct Pointer to Octeon device
80 * @param baridx bar index
82 static inline void octeon_unmap_pci_barx(struct octeon_device
*oct
, int baridx
)
84 dev_dbg(&oct
->pci_dev
->dev
, "Freeing PCI mapped regions for Bar%d\n",
87 if (oct
->mmio
[baridx
].done
)
88 iounmap(oct
->mmio
[baridx
].hw_addr
);
90 if (oct
->mmio
[baridx
].start
)
91 pci_release_region(oct
->pci_dev
, baridx
* 2);
95 * \brief maps a PCI BAR
96 * @param oct Pointer to Octeon device
97 * @param baridx bar index
98 * @param max_map_len maximum length of mapped memory
100 static inline int octeon_map_pci_barx(struct octeon_device
*oct
,
101 int baridx
, int max_map_len
)
105 if (pci_request_region(oct
->pci_dev
, baridx
* 2, DRV_NAME
)) {
106 dev_err(&oct
->pci_dev
->dev
, "pci_request_region failed for bar %d\n",
111 oct
->mmio
[baridx
].start
= pci_resource_start(oct
->pci_dev
, baridx
* 2);
112 oct
->mmio
[baridx
].len
= pci_resource_len(oct
->pci_dev
, baridx
* 2);
114 mapped_len
= oct
->mmio
[baridx
].len
;
116 goto err_release_region
;
118 if (max_map_len
&& (mapped_len
> max_map_len
))
119 mapped_len
= max_map_len
;
121 oct
->mmio
[baridx
].hw_addr
=
122 ioremap(oct
->mmio
[baridx
].start
, mapped_len
);
123 oct
->mmio
[baridx
].mapped_len
= mapped_len
;
125 dev_dbg(&oct
->pci_dev
->dev
, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
126 baridx
, oct
->mmio
[baridx
].start
, mapped_len
,
127 oct
->mmio
[baridx
].len
);
129 if (!oct
->mmio
[baridx
].hw_addr
) {
130 dev_err(&oct
->pci_dev
->dev
, "error ioremap for bar %d\n",
132 goto err_release_region
;
134 oct
->mmio
[baridx
].done
= 1;
139 pci_release_region(oct
->pci_dev
, baridx
* 2);
144 cnnic_numa_alloc_aligned_dma(u32 size
,
152 #define OCTEON_MAX_ALLOC_RETRIES 1
154 struct page
*page
= NULL
;
156 page
= alloc_pages_node(numa_node
,
160 page
= alloc_pages(GFP_KERNEL
,
162 ptr
= (void *)page_address(page
);
163 if ((unsigned long)ptr
& 0x07) {
164 __free_pages(page
, get_order(size
));
166 /* Increment the size required if the first
173 } while ((retries
<= OCTEON_MAX_ALLOC_RETRIES
) && !ptr
);
176 *orig_ptr
= (unsigned long)ptr
;
177 if ((unsigned long)ptr
& 0x07)
178 ptr
= (void *)(((unsigned long)ptr
+ 7) & ~(7UL));
182 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
183 free_pages(orig_ptr, get_order(size))
186 sleep_cond(wait_queue_head_t
*wait_queue
, int *condition
)
191 init_waitqueue_entry(&we
, current
);
192 add_wait_queue(wait_queue
, &we
);
193 while (!(READ_ONCE(*condition
))) {
194 set_current_state(TASK_INTERRUPTIBLE
);
195 if (signal_pending(current
)) {
202 set_current_state(TASK_RUNNING
);
203 remove_wait_queue(wait_queue
, &we
);
207 /* Gives up the CPU for a timeout period.
208 * Check that the condition is not true before we go to sleep for a
212 sleep_timeout_cond(wait_queue_head_t
*wait_queue
,
218 init_waitqueue_entry(&we
, current
);
219 add_wait_queue(wait_queue
, &we
);
220 set_current_state(TASK_INTERRUPTIBLE
);
222 schedule_timeout(timeout
);
223 set_current_state(TASK_RUNNING
);
224 remove_wait_queue(wait_queue
, &we
);
228 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
232 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
236 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
240 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
243 #endif /* _OCTEON_MAIN_H_ */