]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/rdma/hfi1/qp.h
Merge branch 'stable/for-jens-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / rdma / hfi1 / qp.h
1 #ifndef _QP_H
2 #define _QP_H
3 /*
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2015 Intel Corporation.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Copyright(c) 2015 Intel Corporation.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 *
29 * - Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * - Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in
33 * the documentation and/or other materials provided with the
34 * distribution.
35 * - Neither the name of Intel Corporation nor the names of its
36 * contributors may be used to endorse or promote products derived
37 * from this software without specific prior written permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
42 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
43 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
44 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
45 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
46 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
47 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 *
51 */
52
53 #include <linux/hash.h>
54 #include "verbs.h"
55 #include "sdma.h"
56
57 #define QPN_MAX (1 << 24)
58 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
59
60 /*
61 * QPN-map pages start out as NULL, they get allocated upon
62 * first use and are never deallocated. This way,
63 * large bitmaps are not allocated unless large numbers of QPs are used.
64 */
65 struct qpn_map {
66 void *page;
67 };
68
69 struct hfi1_qpn_table {
70 spinlock_t lock; /* protect changes in this struct */
71 unsigned flags; /* flags for QP0/1 allocated for each port */
72 u32 last; /* last QP number allocated */
73 u32 nmaps; /* size of the map table */
74 u16 limit;
75 u8 incr;
76 /* bit map of free QP numbers other than 0/1 */
77 struct qpn_map map[QPNMAP_ENTRIES];
78 };
79
80 struct hfi1_qp_ibdev {
81 u32 qp_table_size;
82 u32 qp_table_bits;
83 struct hfi1_qp __rcu **qp_table;
84 spinlock_t qpt_lock;
85 struct hfi1_qpn_table qpn_table;
86 };
87
88 static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn)
89 {
90 return hash_32(qpn, dev->qp_table_bits);
91 }
92
93 /**
94 * hfi1_lookup_qpn - return the QP with the given QPN
95 * @ibp: the ibport
96 * @qpn: the QP number to look up
97 *
98 * The caller must hold the rcu_read_lock(), and keep the lock until
99 * the returned qp is no longer in use.
100 */
101 static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
102 u32 qpn) __must_hold(RCU)
103 {
104 struct hfi1_qp *qp = NULL;
105
106 if (unlikely(qpn <= 1)) {
107 qp = rcu_dereference(ibp->qp[qpn]);
108 } else {
109 struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
110 u32 n = qpn_hash(dev->qp_dev, qpn);
111
112 for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp;
113 qp = rcu_dereference(qp->next))
114 if (qp->ibqp.qp_num == qpn)
115 break;
116 }
117 return qp;
118 }
119
120 /**
121 * clear_ahg - reset ahg status in qp
122 * @qp - qp pointer
123 */
124 static inline void clear_ahg(struct hfi1_qp *qp)
125 {
126 qp->s_hdr->ahgcount = 0;
127 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
128 if (qp->s_sde && qp->s_ahgidx >= 0)
129 sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
130 qp->s_ahgidx = -1;
131 }
132
133 /**
134 * hfi1_error_qp - put a QP into the error state
135 * @qp: the QP to put into the error state
136 * @err: the receive completion error to signal if a RWQE is active
137 *
138 * Flushes both send and receive work queues.
139 * Returns true if last WQE event should be generated.
140 * The QP r_lock and s_lock should be held and interrupts disabled.
141 * If we are already in error state, just return.
142 */
143 int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err);
144
145 /**
146 * hfi1_modify_qp - modify the attributes of a queue pair
147 * @ibqp: the queue pair who's attributes we're modifying
148 * @attr: the new attributes
149 * @attr_mask: the mask of attributes to modify
150 * @udata: user data for libibverbs.so
151 *
152 * Returns 0 on success, otherwise returns an errno.
153 */
154 int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
155 int attr_mask, struct ib_udata *udata);
156
157 int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
158 int attr_mask, struct ib_qp_init_attr *init_attr);
159
160 /**
161 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
162 * @qp: the queue pair to compute the AETH for
163 *
164 * Returns the AETH.
165 */
166 __be32 hfi1_compute_aeth(struct hfi1_qp *qp);
167
168 /**
169 * hfi1_create_qp - create a queue pair for a device
170 * @ibpd: the protection domain who's device we create the queue pair for
171 * @init_attr: the attributes of the queue pair
172 * @udata: user data for libibverbs.so
173 *
174 * Returns the queue pair on success, otherwise returns an errno.
175 *
176 * Called by the ib_create_qp() core verbs function.
177 */
178 struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
179 struct ib_qp_init_attr *init_attr,
180 struct ib_udata *udata);
181 /**
182 * hfi1_destroy_qp - destroy a queue pair
183 * @ibqp: the queue pair to destroy
184 *
185 * Returns 0 on success.
186 *
187 * Note that this can be called while the QP is actively sending or
188 * receiving!
189 */
190 int hfi1_destroy_qp(struct ib_qp *ibqp);
191
192 /**
193 * hfi1_get_credit - flush the send work queue of a QP
194 * @qp: the qp who's send work queue to flush
195 * @aeth: the Acknowledge Extended Transport Header
196 *
197 * The QP s_lock should be held.
198 */
199 void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth);
200
201 /**
202 * hfi1_qp_init - allocate QP tables
203 * @dev: a pointer to the hfi1_ibdev
204 */
205 int hfi1_qp_init(struct hfi1_ibdev *dev);
206
207 /**
208 * hfi1_qp_exit - free the QP related structures
209 * @dev: a pointer to the hfi1_ibdev
210 */
211 void hfi1_qp_exit(struct hfi1_ibdev *dev);
212
213 /**
214 * hfi1_qp_wakeup - wake up on the indicated event
215 * @qp: the QP
216 * @flag: flag the qp on which the qp is stalled
217 */
218 void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag);
219
220 struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
221
222 struct qp_iter;
223
224 /**
225 * qp_iter_init - initialize the iterator for the qp hash list
226 * @dev: the hfi1_ibdev
227 */
228 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev);
229
230 /**
231 * qp_iter_next - Find the next qp in the hash list
232 * @iter: the iterator for the qp hash list
233 */
234 int qp_iter_next(struct qp_iter *iter);
235
236 /**
237 * qp_iter_print - print the qp information to seq_file
238 * @s: the seq_file to emit the qp information on
239 * @iter: the iterator for the qp hash list
240 */
241 void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
242
243 /**
244 * qp_comm_est - handle trap with QP established
245 * @qp: the QP
246 */
247 void qp_comm_est(struct hfi1_qp *qp);
248
249 /**
250 * _hfi1_schedule_send - schedule progress
251 * @qp: the QP
252 *
253 * This schedules qp progress w/o regard to the s_flags.
254 *
255 * It is only used in the post send, which doesn't hold
256 * the s_lock.
257 */
258 static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
259 {
260 struct hfi1_ibport *ibp =
261 to_iport(qp->ibqp.device, qp->port_num);
262 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
263 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
264
265 iowait_schedule(&qp->s_iowait, ppd->hfi1_wq,
266 qp->s_sde ?
267 qp->s_sde->cpu :
268 cpumask_first(cpumask_of_node(dd->assigned_node_id)));
269 }
270
271 /**
272 * hfi1_schedule_send - schedule progress
273 * @qp: the QP
274 *
275 * This schedules qp progress and caller should hold
276 * the s_lock.
277 */
278 static inline void hfi1_schedule_send(struct hfi1_qp *qp)
279 {
280 if (hfi1_send_ok(qp))
281 _hfi1_schedule_send(qp);
282 }
283
284 void hfi1_migrate_qp(struct hfi1_qp *qp);
285
286 #endif /* _QP_H */