]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/ntb/ntb_transport.c
ntb_tool: Add memory window debug support
[mirror_ubuntu-zesty-kernel.git] / drivers / ntb / ntb_transport.c
CommitLineData
fce8a7bb
JM
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
e26a5843 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
fce8a7bb
JM
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
e26a5843 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
fce8a7bb
JM
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
e26a5843 45 * PCIe NTB Transport Linux driver
fce8a7bb
JM
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50#include <linux/debugfs.h>
51#include <linux/delay.h>
282a2fee 52#include <linux/dmaengine.h>
fce8a7bb
JM
53#include <linux/dma-mapping.h>
54#include <linux/errno.h>
55#include <linux/export.h>
56#include <linux/interrupt.h>
57#include <linux/module.h>
58#include <linux/pci.h>
59#include <linux/slab.h>
60#include <linux/types.h>
06917f75 61#include <linux/uaccess.h>
e26a5843
AH
62#include "linux/ntb.h"
63#include "linux/ntb_transport.h"
fce8a7bb 64
e26a5843
AH
65#define NTB_TRANSPORT_VERSION 4
66#define NTB_TRANSPORT_VER "4"
67#define NTB_TRANSPORT_NAME "ntb_transport"
68#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69
70MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71MODULE_VERSION(NTB_TRANSPORT_VER);
72MODULE_LICENSE("Dual BSD/GPL");
73MODULE_AUTHOR("Intel Corporation");
74
75static unsigned long max_mw_size;
76module_param(max_mw_size, ulong, 0644);
77MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
fce8a7bb 78
9891417d 79static unsigned int transport_mtu = 0x10000;
fce8a7bb
JM
80module_param(transport_mtu, uint, 0644);
81MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82
948d3a65 83static unsigned char max_num_clients;
fce8a7bb
JM
84module_param(max_num_clients, byte, 0644);
85MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86
282a2fee
JM
87static unsigned int copy_bytes = 1024;
88module_param(copy_bytes, uint, 0644);
89MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90
a41ef053
DJ
91static bool use_dma;
92module_param(use_dma, bool, 0644);
93MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
e26a5843
AH
95static struct dentry *nt_debugfs_dir;
96
fce8a7bb
JM
97struct ntb_queue_entry {
98 /* ntb_queue list reference */
99 struct list_head entry;
e26a5843 100 /* pointers to data to be transferred */
fce8a7bb
JM
101 void *cb_data;
102 void *buf;
103 unsigned int len;
104 unsigned int flags;
282a2fee
JM
105
106 struct ntb_transport_qp *qp;
107 union {
108 struct ntb_payload_header __iomem *tx_hdr;
109 struct ntb_payload_header *rx_hdr;
110 };
111 unsigned int index;
fce8a7bb
JM
112};
113
793c20e9
JM
114struct ntb_rx_info {
115 unsigned int entry;
116};
117
fce8a7bb 118struct ntb_transport_qp {
e26a5843
AH
119 struct ntb_transport_ctx *transport;
120 struct ntb_dev *ndev;
fce8a7bb 121 void *cb_data;
569410ca
DJ
122 struct dma_chan *tx_dma_chan;
123 struct dma_chan *rx_dma_chan;
fce8a7bb
JM
124
125 bool client_ready;
e26a5843 126 bool link_is_up;
e9021331 127 bool active;
e26a5843 128
fce8a7bb 129 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
e26a5843 130 u64 qp_bit;
fce8a7bb 131
74465645 132 struct ntb_rx_info __iomem *rx_info;
793c20e9
JM
133 struct ntb_rx_info *remote_rx_info;
134
53ca4fea
JM
135 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
136 void *data, int len);
fce8a7bb
JM
137 struct list_head tx_free_q;
138 spinlock_t ntb_tx_free_q_lock;
74465645 139 void __iomem *tx_mw;
282a2fee 140 dma_addr_t tx_mw_phys;
793c20e9
JM
141 unsigned int tx_index;
142 unsigned int tx_max_entry;
ef114ed5 143 unsigned int tx_max_frame;
fce8a7bb 144
53ca4fea
JM
145 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
146 void *data, int len);
da2e5ae5 147 struct list_head rx_post_q;
fce8a7bb
JM
148 struct list_head rx_pend_q;
149 struct list_head rx_free_q;
da2e5ae5
AH
150 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
151 spinlock_t ntb_rx_q_lock;
793c20e9
JM
152 void *rx_buff;
153 unsigned int rx_index;
154 unsigned int rx_max_entry;
ef114ed5 155 unsigned int rx_max_frame;
a754a8fc 156 unsigned int rx_alloc_entry;
282a2fee 157 dma_cookie_t last_cookie;
e26a5843 158 struct tasklet_struct rxc_db_work;
fce8a7bb 159
53ca4fea 160 void (*event_handler)(void *data, int status);
fce8a7bb 161 struct delayed_work link_work;
7b4f2d3c 162 struct work_struct link_cleanup;
fce8a7bb
JM
163
164 struct dentry *debugfs_dir;
165 struct dentry *debugfs_stats;
166
167 /* Stats */
168 u64 rx_bytes;
169 u64 rx_pkts;
170 u64 rx_ring_empty;
171 u64 rx_err_no_buf;
172 u64 rx_err_oflow;
173 u64 rx_err_ver;
282a2fee
JM
174 u64 rx_memcpy;
175 u64 rx_async;
8c874cc1 176 u64 dma_rx_prep_err;
fce8a7bb
JM
177 u64 tx_bytes;
178 u64 tx_pkts;
179 u64 tx_ring_full;
282a2fee
JM
180 u64 tx_err_no_buf;
181 u64 tx_memcpy;
182 u64 tx_async;
8c874cc1 183 u64 dma_tx_prep_err;
fce8a7bb
JM
184};
185
186struct ntb_transport_mw {
e26a5843
AH
187 phys_addr_t phys_addr;
188 resource_size_t phys_size;
189 resource_size_t xlat_align;
190 resource_size_t xlat_align_size;
191 void __iomem *vbase;
192 size_t xlat_size;
193 size_t buff_size;
fce8a7bb
JM
194 void *virt_addr;
195 dma_addr_t dma_addr;
196};
197
198struct ntb_transport_client_dev {
199 struct list_head entry;
e26a5843 200 struct ntb_transport_ctx *nt;
fce8a7bb
JM
201 struct device dev;
202};
203
e26a5843 204struct ntb_transport_ctx {
fce8a7bb
JM
205 struct list_head entry;
206 struct list_head client_devs;
207
e26a5843
AH
208 struct ntb_dev *ndev;
209
210 struct ntb_transport_mw *mw_vec;
211 struct ntb_transport_qp *qp_vec;
212 unsigned int mw_count;
213 unsigned int qp_count;
214 u64 qp_bitmap;
215 u64 qp_bitmap_free;
216
217 bool link_is_up;
fce8a7bb 218 struct delayed_work link_work;
7b4f2d3c 219 struct work_struct link_cleanup;
c8650fd0
DJ
220
221 struct dentry *debugfs_node_dir;
fce8a7bb
JM
222};
223
224enum {
e26a5843
AH
225 DESC_DONE_FLAG = BIT(0),
226 LINK_DOWN_FLAG = BIT(1),
fce8a7bb
JM
227};
228
229struct ntb_payload_header {
74465645 230 unsigned int ver;
fce8a7bb
JM
231 unsigned int len;
232 unsigned int flags;
233};
234
235enum {
236 VERSION = 0,
fce8a7bb 237 QP_LINKS,
113fc505
JM
238 NUM_QPS,
239 NUM_MWS,
240 MW0_SZ_HIGH,
241 MW0_SZ_LOW,
242 MW1_SZ_HIGH,
243 MW1_SZ_LOW,
fce8a7bb
JM
244 MAX_SPAD,
245};
246
e26a5843
AH
247#define dev_client_dev(__dev) \
248 container_of((__dev), struct ntb_transport_client_dev, dev)
249
250#define drv_client(__drv) \
251 container_of((__drv), struct ntb_transport_client, driver)
252
253#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
fce8a7bb
JM
254#define NTB_QP_DEF_NUM_ENTRIES 100
255#define NTB_LINK_DOWN_TIMEOUT 10
8c874cc1
DJ
256#define DMA_RETRIES 20
257#define DMA_OUT_RESOURCE_TO 50
fce8a7bb 258
e26a5843
AH
259static void ntb_transport_rxc_db(unsigned long data);
260static const struct ntb_ctx_ops ntb_transport_ops;
261static struct ntb_client ntb_transport_client;
262
263static int ntb_transport_bus_match(struct device *dev,
264 struct device_driver *drv)
fce8a7bb
JM
265{
266 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
267}
268
e26a5843 269static int ntb_transport_bus_probe(struct device *dev)
fce8a7bb 270{
e26a5843 271 const struct ntb_transport_client *client;
fce8a7bb
JM
272 int rc = -EINVAL;
273
274 get_device(dev);
e26a5843
AH
275
276 client = drv_client(dev->driver);
277 rc = client->probe(dev);
fce8a7bb
JM
278 if (rc)
279 put_device(dev);
280
281 return rc;
282}
283
e26a5843 284static int ntb_transport_bus_remove(struct device *dev)
fce8a7bb 285{
e26a5843 286 const struct ntb_transport_client *client;
fce8a7bb 287
e26a5843
AH
288 client = drv_client(dev->driver);
289 client->remove(dev);
fce8a7bb
JM
290
291 put_device(dev);
292
293 return 0;
294}
295
e26a5843
AH
296static struct bus_type ntb_transport_bus = {
297 .name = "ntb_transport",
298 .match = ntb_transport_bus_match,
299 .probe = ntb_transport_bus_probe,
300 .remove = ntb_transport_bus_remove,
fce8a7bb
JM
301};
302
303static LIST_HEAD(ntb_transport_list);
304
e26a5843 305static int ntb_bus_init(struct ntb_transport_ctx *nt)
fce8a7bb 306{
31510000 307 list_add_tail(&nt->entry, &ntb_transport_list);
fce8a7bb
JM
308 return 0;
309}
310
e26a5843 311static void ntb_bus_remove(struct ntb_transport_ctx *nt)
fce8a7bb
JM
312{
313 struct ntb_transport_client_dev *client_dev, *cd;
314
315 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
316 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
317 dev_name(&client_dev->dev));
318 list_del(&client_dev->entry);
319 device_unregister(&client_dev->dev);
320 }
321
322 list_del(&nt->entry);
fce8a7bb
JM
323}
324
e26a5843 325static void ntb_transport_client_release(struct device *dev)
fce8a7bb
JM
326{
327 struct ntb_transport_client_dev *client_dev;
fce8a7bb 328
e26a5843 329 client_dev = dev_client_dev(dev);
fce8a7bb
JM
330 kfree(client_dev);
331}
332
333/**
e26a5843 334 * ntb_transport_unregister_client_dev - Unregister NTB client device
fce8a7bb
JM
335 * @device_name: Name of NTB client device
336 *
337 * Unregister an NTB client device with the NTB transport layer
338 */
e26a5843 339void ntb_transport_unregister_client_dev(char *device_name)
fce8a7bb
JM
340{
341 struct ntb_transport_client_dev *client, *cd;
e26a5843 342 struct ntb_transport_ctx *nt;
fce8a7bb
JM
343
344 list_for_each_entry(nt, &ntb_transport_list, entry)
345 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
346 if (!strncmp(dev_name(&client->dev), device_name,
347 strlen(device_name))) {
348 list_del(&client->entry);
349 device_unregister(&client->dev);
350 }
351}
e26a5843 352EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
fce8a7bb
JM
353
354/**
e26a5843 355 * ntb_transport_register_client_dev - Register NTB client device
fce8a7bb
JM
356 * @device_name: Name of NTB client device
357 *
358 * Register an NTB client device with the NTB transport layer
359 */
e26a5843 360int ntb_transport_register_client_dev(char *device_name)
fce8a7bb
JM
361{
362 struct ntb_transport_client_dev *client_dev;
e26a5843 363 struct ntb_transport_ctx *nt;
1199aa61 364 int node;
8b19d450 365 int rc, i = 0;
fce8a7bb 366
8222b402
JM
367 if (list_empty(&ntb_transport_list))
368 return -ENODEV;
369
fce8a7bb
JM
370 list_for_each_entry(nt, &ntb_transport_list, entry) {
371 struct device *dev;
372
1199aa61
AH
373 node = dev_to_node(&nt->ndev->dev);
374
375 client_dev = kzalloc_node(sizeof(*client_dev),
376 GFP_KERNEL, node);
fce8a7bb
JM
377 if (!client_dev) {
378 rc = -ENOMEM;
379 goto err;
380 }
381
382 dev = &client_dev->dev;
383
384 /* setup and register client devices */
8b19d450 385 dev_set_name(dev, "%s%d", device_name, i);
e26a5843
AH
386 dev->bus = &ntb_transport_bus;
387 dev->release = ntb_transport_client_release;
388 dev->parent = &nt->ndev->dev;
fce8a7bb
JM
389
390 rc = device_register(dev);
391 if (rc) {
392 kfree(client_dev);
393 goto err;
394 }
395
396 list_add_tail(&client_dev->entry, &nt->client_devs);
8b19d450 397 i++;
fce8a7bb
JM
398 }
399
400 return 0;
401
402err:
e26a5843 403 ntb_transport_unregister_client_dev(device_name);
fce8a7bb
JM
404
405 return rc;
406}
e26a5843 407EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
fce8a7bb
JM
408
409/**
ec110bc7 410 * ntb_transport_register_client - Register NTB client driver
fce8a7bb
JM
411 * @drv: NTB client driver to be registered
412 *
413 * Register an NTB client driver with the NTB transport layer
414 *
415 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
416 */
e26a5843 417int ntb_transport_register_client(struct ntb_transport_client *drv)
fce8a7bb 418{
e26a5843 419 drv->driver.bus = &ntb_transport_bus;
fce8a7bb 420
8222b402
JM
421 if (list_empty(&ntb_transport_list))
422 return -ENODEV;
423
fce8a7bb
JM
424 return driver_register(&drv->driver);
425}
ec110bc7 426EXPORT_SYMBOL_GPL(ntb_transport_register_client);
fce8a7bb
JM
427
428/**
ec110bc7 429 * ntb_transport_unregister_client - Unregister NTB client driver
fce8a7bb
JM
430 * @drv: NTB client driver to be unregistered
431 *
432 * Unregister an NTB client driver with the NTB transport layer
433 *
434 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
435 */
e26a5843 436void ntb_transport_unregister_client(struct ntb_transport_client *drv)
fce8a7bb
JM
437{
438 driver_unregister(&drv->driver);
439}
ec110bc7 440EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
fce8a7bb 441
fce8a7bb
JM
442static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
443 loff_t *offp)
444{
445 struct ntb_transport_qp *qp;
d7237e22 446 char *buf;
fce8a7bb
JM
447 ssize_t ret, out_offset, out_count;
448
260bee94
DJ
449 qp = filp->private_data;
450
451 if (!qp || !qp->link_is_up)
452 return 0;
453
282a2fee 454 out_count = 1000;
d7237e22
JM
455
456 buf = kmalloc(out_count, GFP_KERNEL);
457 if (!buf)
458 return -ENOMEM;
fce8a7bb 459
fce8a7bb
JM
460 out_offset = 0;
461 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e 462 "\nNTB QP stats:\n\n");
fce8a7bb
JM
463 out_offset += snprintf(buf + out_offset, out_count - out_offset,
464 "rx_bytes - \t%llu\n", qp->rx_bytes);
465 out_offset += snprintf(buf + out_offset, out_count - out_offset,
466 "rx_pkts - \t%llu\n", qp->rx_pkts);
282a2fee
JM
467 out_offset += snprintf(buf + out_offset, out_count - out_offset,
468 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
469 out_offset += snprintf(buf + out_offset, out_count - out_offset,
470 "rx_async - \t%llu\n", qp->rx_async);
fce8a7bb
JM
471 out_offset += snprintf(buf + out_offset, out_count - out_offset,
472 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
473 out_offset += snprintf(buf + out_offset, out_count - out_offset,
474 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
475 out_offset += snprintf(buf + out_offset, out_count - out_offset,
476 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
477 out_offset += snprintf(buf + out_offset, out_count - out_offset,
478 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
479 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e 480 "rx_buff - \t0x%p\n", qp->rx_buff);
fce8a7bb 481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
793c20e9 482 "rx_index - \t%u\n", qp->rx_index);
fce8a7bb 483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
a754a8fc
DJ
484 "rx_max_entry - \t%u\n", qp->rx_max_entry);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
486 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
fce8a7bb
JM
487
488 out_offset += snprintf(buf + out_offset, out_count - out_offset,
489 "tx_bytes - \t%llu\n", qp->tx_bytes);
490 out_offset += snprintf(buf + out_offset, out_count - out_offset,
491 "tx_pkts - \t%llu\n", qp->tx_pkts);
282a2fee
JM
492 out_offset += snprintf(buf + out_offset, out_count - out_offset,
493 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
494 out_offset += snprintf(buf + out_offset, out_count - out_offset,
495 "tx_async - \t%llu\n", qp->tx_async);
fce8a7bb
JM
496 out_offset += snprintf(buf + out_offset, out_count - out_offset,
497 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
282a2fee
JM
498 out_offset += snprintf(buf + out_offset, out_count - out_offset,
499 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
fce8a7bb 500 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e 501 "tx_mw - \t0x%p\n", qp->tx_mw);
fce8a7bb 502 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e 503 "tx_index (H) - \t%u\n", qp->tx_index);
fce8a7bb 504 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e 505 "RRI (T) - \t%u\n",
e74bfeed 506 qp->remote_rx_info->entry);
d98ef99e
DJ
507 out_offset += snprintf(buf + out_offset, out_count - out_offset,
508 "tx_max_entry - \t%u\n", qp->tx_max_entry);
e74bfeed
DJ
509 out_offset += snprintf(buf + out_offset, out_count - out_offset,
510 "free tx - \t%u\n",
511 ntb_transport_tx_free_entry(qp));
8c874cc1
DJ
512 out_offset += snprintf(buf + out_offset, out_count - out_offset,
513 "DMA tx prep err - \t%llu\n",
514 qp->dma_tx_prep_err);
515 out_offset += snprintf(buf + out_offset, out_count - out_offset,
516 "DMA rx prep err - \t%llu\n",
517 qp->dma_rx_prep_err);
fce8a7bb
JM
518
519 out_offset += snprintf(buf + out_offset, out_count - out_offset,
d98ef99e
DJ
520 "\n");
521 out_offset += snprintf(buf + out_offset, out_count - out_offset,
569410ca
DJ
522 "Using TX DMA - \t%s\n",
523 qp->tx_dma_chan ? "Yes" : "No");
524 out_offset += snprintf(buf + out_offset, out_count - out_offset,
525 "Using RX DMA - \t%s\n",
526 qp->rx_dma_chan ? "Yes" : "No");
d98ef99e
DJ
527 out_offset += snprintf(buf + out_offset, out_count - out_offset,
528 "QP Link - \t%s\n",
e26a5843 529 qp->link_is_up ? "Up" : "Down");
d98ef99e
DJ
530 out_offset += snprintf(buf + out_offset, out_count - out_offset,
531 "\n");
532
d7237e22
JM
533 if (out_offset > out_count)
534 out_offset = out_count;
fce8a7bb
JM
535
536 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
d7237e22 537 kfree(buf);
fce8a7bb
JM
538 return ret;
539}
540
541static const struct file_operations ntb_qp_debugfs_stats = {
542 .owner = THIS_MODULE,
d66d7ac2 543 .open = simple_open,
fce8a7bb
JM
544 .read = debugfs_read,
545};
546
547static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
548 struct list_head *list)
549{
550 unsigned long flags;
551
552 spin_lock_irqsave(lock, flags);
553 list_add_tail(entry, list);
554 spin_unlock_irqrestore(lock, flags);
555}
556
557static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
53ca4fea 558 struct list_head *list)
fce8a7bb
JM
559{
560 struct ntb_queue_entry *entry;
561 unsigned long flags;
562
563 spin_lock_irqsave(lock, flags);
564 if (list_empty(list)) {
565 entry = NULL;
566 goto out;
567 }
568 entry = list_first_entry(list, struct ntb_queue_entry, entry);
569 list_del(&entry->entry);
e74bfeed 570
fce8a7bb
JM
571out:
572 spin_unlock_irqrestore(lock, flags);
573
574 return entry;
575}
576
da2e5ae5
AH
577static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
578 struct list_head *list,
579 struct list_head *to_list)
580{
581 struct ntb_queue_entry *entry;
582 unsigned long flags;
583
584 spin_lock_irqsave(lock, flags);
585
586 if (list_empty(list)) {
587 entry = NULL;
588 } else {
589 entry = list_first_entry(list, struct ntb_queue_entry, entry);
590 list_move_tail(&entry->entry, to_list);
591 }
592
593 spin_unlock_irqrestore(lock, flags);
594
595 return entry;
596}
597
e26a5843
AH
598static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
599 unsigned int qp_num)
fce8a7bb 600{
e26a5843
AH
601 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
602 struct ntb_transport_mw *mw;
a754a8fc
DJ
603 struct ntb_dev *ndev = nt->ndev;
604 struct ntb_queue_entry *entry;
ef114ed5 605 unsigned int rx_size, num_qps_mw;
e26a5843 606 unsigned int mw_num, mw_count, qp_count;
793c20e9 607 unsigned int i;
a754a8fc 608 int node;
fce8a7bb 609
e26a5843
AH
610 mw_count = nt->mw_count;
611 qp_count = nt->qp_count;
948d3a65 612
e26a5843
AH
613 mw_num = QP_TO_MW(nt, qp_num);
614 mw = &nt->mw_vec[mw_num];
615
616 if (!mw->virt_addr)
617 return -ENOMEM;
fce8a7bb 618
e26a5843
AH
619 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
620 num_qps_mw = qp_count / mw_count + 1;
fce8a7bb 621 else
e26a5843 622 num_qps_mw = qp_count / mw_count;
fce8a7bb 623
e26a5843 624 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
c92ba3c5 625 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
793c20e9
JM
626 rx_size -= sizeof(struct ntb_rx_info);
627
282a2fee
JM
628 qp->remote_rx_info = qp->rx_buff + rx_size;
629
c9d534c8
JM
630 /* Due to housekeeping, there must be atleast 2 buffs */
631 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
793c20e9
JM
632 qp->rx_max_entry = rx_size / qp->rx_max_frame;
633 qp->rx_index = 0;
634
a754a8fc
DJ
635 /*
636 * Checking to see if we have more entries than the default.
637 * We should add additional entries if that is the case so we
638 * can be in sync with the transport frames.
639 */
640 node = dev_to_node(&ndev->dev);
641 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
642 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
643 if (!entry)
644 return -ENOMEM;
645
646 entry->qp = qp;
647 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
648 &qp->rx_free_q);
649 qp->rx_alloc_entry++;
650 }
651
c9d534c8 652 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
fce8a7bb 653
ef114ed5 654 /* setup the hdr offsets with 0's */
793c20e9 655 for (i = 0; i < qp->rx_max_entry; i++) {
e26a5843
AH
656 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
657 sizeof(struct ntb_payload_header));
ef114ed5 658 memset(offset, 0, sizeof(struct ntb_payload_header));
793c20e9 659 }
fce8a7bb
JM
660
661 qp->rx_pkts = 0;
662 qp->tx_pkts = 0;
90f9e934 663 qp->tx_index = 0;
e26a5843
AH
664
665 return 0;
fce8a7bb
JM
666}
667
e26a5843 668static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
b77b2637 669{
e26a5843
AH
670 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
671 struct pci_dev *pdev = nt->ndev->pdev;
b77b2637
JM
672
673 if (!mw->virt_addr)
674 return;
675
e26a5843
AH
676 ntb_mw_clear_trans(nt->ndev, num_mw);
677 dma_free_coherent(&pdev->dev, mw->buff_size,
678 mw->virt_addr, mw->dma_addr);
679 mw->xlat_size = 0;
680 mw->buff_size = 0;
b77b2637
JM
681 mw->virt_addr = NULL;
682}
683
e26a5843 684static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
8c9edf63 685 resource_size_t size)
fce8a7bb 686{
e26a5843
AH
687 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
688 struct pci_dev *pdev = nt->ndev->pdev;
8c9edf63 689 size_t xlat_size, buff_size;
e26a5843
AH
690 int rc;
691
8c9edf63
AH
692 if (!size)
693 return -EINVAL;
694
e26a5843
AH
695 xlat_size = round_up(size, mw->xlat_align_size);
696 buff_size = round_up(size, mw->xlat_align);
fce8a7bb 697
b77b2637 698 /* No need to re-setup */
e26a5843 699 if (mw->xlat_size == xlat_size)
b77b2637
JM
700 return 0;
701
e26a5843 702 if (mw->buff_size)
b77b2637
JM
703 ntb_free_mw(nt, num_mw);
704
e26a5843
AH
705 /* Alloc memory for receiving data. Must be aligned */
706 mw->xlat_size = xlat_size;
707 mw->buff_size = buff_size;
fce8a7bb 708
e26a5843
AH
709 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
710 &mw->dma_addr, GFP_KERNEL);
fce8a7bb 711 if (!mw->virt_addr) {
e26a5843
AH
712 mw->xlat_size = 0;
713 mw->buff_size = 0;
8c9edf63 714 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
e26a5843 715 buff_size);
fce8a7bb
JM
716 return -ENOMEM;
717 }
718
3cc5ba19
DJ
719 /*
720 * we must ensure that the memory address allocated is BAR size
721 * aligned in order for the XLAT register to take the value. This
722 * is a requirement of the hardware. It is recommended to setup CMA
723 * for BAR sizes equal or greater than 4MB.
724 */
e26a5843
AH
725 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
726 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
3cc5ba19
DJ
727 &mw->dma_addr);
728 ntb_free_mw(nt, num_mw);
729 return -ENOMEM;
730 }
731
fce8a7bb 732 /* Notify HW the memory location of the receive buffer */
e26a5843
AH
733 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
734 if (rc) {
735 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
736 ntb_free_mw(nt, num_mw);
737 return -EIO;
738 }
fce8a7bb
JM
739
740 return 0;
741}
742
2849b5d7
AH
743static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
744{
745 qp->link_is_up = false;
e9021331 746 qp->active = false;
2849b5d7
AH
747
748 qp->tx_index = 0;
749 qp->rx_index = 0;
750 qp->rx_bytes = 0;
751 qp->rx_pkts = 0;
752 qp->rx_ring_empty = 0;
753 qp->rx_err_no_buf = 0;
754 qp->rx_err_oflow = 0;
755 qp->rx_err_ver = 0;
756 qp->rx_memcpy = 0;
757 qp->rx_async = 0;
758 qp->tx_bytes = 0;
759 qp->tx_pkts = 0;
760 qp->tx_ring_full = 0;
761 qp->tx_err_no_buf = 0;
762 qp->tx_memcpy = 0;
763 qp->tx_async = 0;
8c874cc1
DJ
764 qp->dma_tx_prep_err = 0;
765 qp->dma_rx_prep_err = 0;
2849b5d7
AH
766}
767
fca4d518 768static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
fce8a7bb 769{
e26a5843
AH
770 struct ntb_transport_ctx *nt = qp->transport;
771 struct pci_dev *pdev = nt->ndev->pdev;
fce8a7bb 772
e22e0b9d 773 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
2849b5d7
AH
774
775 cancel_delayed_work_sync(&qp->link_work);
776 ntb_qp_link_down_reset(qp);
e26a5843
AH
777
778 if (qp->event_handler)
779 qp->event_handler(qp->cb_data, qp->link_is_up);
fca4d518
JM
780}
781
782static void ntb_qp_link_cleanup_work(struct work_struct *work)
783{
784 struct ntb_transport_qp *qp = container_of(work,
785 struct ntb_transport_qp,
786 link_cleanup);
e26a5843 787 struct ntb_transport_ctx *nt = qp->transport;
fca4d518
JM
788
789 ntb_qp_link_cleanup(qp);
fce8a7bb 790
e26a5843 791 if (nt->link_is_up)
fce8a7bb
JM
792 schedule_delayed_work(&qp->link_work,
793 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
794}
795
7b4f2d3c
JM
796static void ntb_qp_link_down(struct ntb_transport_qp *qp)
797{
798 schedule_work(&qp->link_cleanup);
799}
800
e26a5843 801static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
fce8a7bb 802{
e26a5843
AH
803 struct ntb_transport_qp *qp;
804 u64 qp_bitmap_alloc;
fce8a7bb
JM
805 int i;
806
e26a5843
AH
807 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
808
fca4d518 809 /* Pass along the info to any clients */
e26a5843
AH
810 for (i = 0; i < nt->qp_count; i++)
811 if (qp_bitmap_alloc & BIT_ULL(i)) {
812 qp = &nt->qp_vec[i];
813 ntb_qp_link_cleanup(qp);
814 cancel_work_sync(&qp->link_cleanup);
815 cancel_delayed_work_sync(&qp->link_work);
816 }
fca4d518 817
e26a5843 818 if (!nt->link_is_up)
fce8a7bb 819 cancel_delayed_work_sync(&nt->link_work);
fce8a7bb 820
fce8a7bb
JM
821 /* The scratchpad registers keep the values if the remote side
822 * goes down, blast them now to give them a sane value the next
823 * time they are accessed
824 */
825 for (i = 0; i < MAX_SPAD; i++)
e26a5843 826 ntb_spad_write(nt->ndev, i, 0);
fce8a7bb
JM
827}
828
fca4d518
JM
829static void ntb_transport_link_cleanup_work(struct work_struct *work)
830{
e26a5843
AH
831 struct ntb_transport_ctx *nt =
832 container_of(work, struct ntb_transport_ctx, link_cleanup);
fca4d518
JM
833
834 ntb_transport_link_cleanup(nt);
835}
836
e26a5843 837static void ntb_transport_event_callback(void *data)
fce8a7bb 838{
e26a5843 839 struct ntb_transport_ctx *nt = data;
fce8a7bb 840
e26a5843 841 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
fce8a7bb 842 schedule_delayed_work(&nt->link_work, 0);
e26a5843 843 else
7b4f2d3c 844 schedule_work(&nt->link_cleanup);
fce8a7bb
JM
845}
846
847static void ntb_transport_link_work(struct work_struct *work)
848{
e26a5843
AH
849 struct ntb_transport_ctx *nt =
850 container_of(work, struct ntb_transport_ctx, link_work.work);
851 struct ntb_dev *ndev = nt->ndev;
852 struct pci_dev *pdev = ndev->pdev;
853 resource_size_t size;
fce8a7bb 854 u32 val;
84f76685 855 int rc = 0, i, spad;
fce8a7bb 856
113fc505 857 /* send the local info, in the opposite order of the way we read it */
e26a5843
AH
858 for (i = 0; i < nt->mw_count; i++) {
859 size = nt->mw_vec[i].phys_size;
fce8a7bb 860
e26a5843
AH
861 if (max_mw_size && size > max_mw_size)
862 size = max_mw_size;
fce8a7bb 863
e26a5843 864 spad = MW0_SZ_HIGH + (i * 2);
fdcb4b2e 865 ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
fce8a7bb 866
e26a5843 867 spad = MW0_SZ_LOW + (i * 2);
fdcb4b2e 868 ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
fce8a7bb
JM
869 }
870
e26a5843 871 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
fce8a7bb 872
e26a5843 873 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
fce8a7bb 874
e26a5843 875 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
fce8a7bb 876
e26a5843 877 /* Query the remote side for its info */
0f69a7df 878 val = ntb_spad_read(ndev, VERSION);
e26a5843
AH
879 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
880 if (val != NTB_TRANSPORT_VERSION)
fce8a7bb 881 goto out;
fce8a7bb 882
0f69a7df 883 val = ntb_spad_read(ndev, NUM_QPS);
fce8a7bb 884 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
e26a5843 885 if (val != nt->qp_count)
fce8a7bb 886 goto out;
fce8a7bb 887
0f69a7df 888 val = ntb_spad_read(ndev, NUM_MWS);
113fc505 889 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
e26a5843
AH
890 if (val != nt->mw_count)
891 goto out;
fce8a7bb 892
e26a5843 893 for (i = 0; i < nt->mw_count; i++) {
113fc505 894 u64 val64;
fce8a7bb 895
0f69a7df 896 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
e26a5843 897 val64 = (u64)val << 32;
113fc505 898
0f69a7df 899 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
113fc505
JM
900 val64 |= val;
901
e26a5843 902 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
113fc505
JM
903
904 rc = ntb_set_mw(nt, i, val64);
905 if (rc)
906 goto out1;
907 }
fce8a7bb 908
e26a5843 909 nt->link_is_up = true;
fce8a7bb 910
e26a5843
AH
911 for (i = 0; i < nt->qp_count; i++) {
912 struct ntb_transport_qp *qp = &nt->qp_vec[i];
fce8a7bb
JM
913
914 ntb_transport_setup_qp_mw(nt, i);
915
e26a5843 916 if (qp->client_ready)
fce8a7bb
JM
917 schedule_delayed_work(&qp->link_work, 0);
918 }
919
920 return;
921
113fc505 922out1:
e26a5843 923 for (i = 0; i < nt->mw_count; i++)
113fc505 924 ntb_free_mw(nt, i);
84f76685
DJ
925
926 /* if there's an actual failure, we should just bail */
927 if (rc < 0) {
928 ntb_link_disable(ndev);
929 return;
930 }
931
fce8a7bb 932out:
e26a5843 933 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
fce8a7bb
JM
934 schedule_delayed_work(&nt->link_work,
935 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
936}
937
938static void ntb_qp_link_work(struct work_struct *work)
939{
940 struct ntb_transport_qp *qp = container_of(work,
941 struct ntb_transport_qp,
942 link_work.work);
e26a5843
AH
943 struct pci_dev *pdev = qp->ndev->pdev;
944 struct ntb_transport_ctx *nt = qp->transport;
945 int val;
fce8a7bb 946
e26a5843 947 WARN_ON(!nt->link_is_up);
fce8a7bb 948
e26a5843 949 val = ntb_spad_read(nt->ndev, QP_LINKS);
fce8a7bb 950
e26a5843 951 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
fce8a7bb
JM
952
953 /* query remote spad for qp ready bits */
e26a5843 954 ntb_peer_spad_read(nt->ndev, QP_LINKS);
28762289 955 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
fce8a7bb
JM
956
957 /* See if the remote side is up */
e26a5843 958 if (val & BIT(qp->qp_num)) {
fce8a7bb 959 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
e26a5843 960 qp->link_is_up = true;
e9021331 961 qp->active = true;
e26a5843 962
fce8a7bb 963 if (qp->event_handler)
e26a5843 964 qp->event_handler(qp->cb_data, qp->link_is_up);
8b5a22d8 965
e9021331
DJ
966 if (qp->active)
967 tasklet_schedule(&qp->rxc_db_work);
e26a5843 968 } else if (nt->link_is_up)
fce8a7bb
JM
969 schedule_delayed_work(&qp->link_work,
970 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
971}
972
e26a5843 973static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
53ca4fea 974 unsigned int qp_num)
fce8a7bb
JM
975{
976 struct ntb_transport_qp *qp;
e26a5843
AH
977 phys_addr_t mw_base;
978 resource_size_t mw_size;
ef114ed5 979 unsigned int num_qps_mw, tx_size;
e26a5843 980 unsigned int mw_num, mw_count, qp_count;
282a2fee 981 u64 qp_offset;
948d3a65 982
e26a5843
AH
983 mw_count = nt->mw_count;
984 qp_count = nt->qp_count;
fce8a7bb 985
e26a5843 986 mw_num = QP_TO_MW(nt, qp_num);
e26a5843
AH
987
988 qp = &nt->qp_vec[qp_num];
fce8a7bb
JM
989 qp->qp_num = qp_num;
990 qp->transport = nt;
991 qp->ndev = nt->ndev;
e26a5843 992 qp->client_ready = false;
fce8a7bb 993 qp->event_handler = NULL;
2849b5d7 994 ntb_qp_link_down_reset(qp);
fce8a7bb 995
e26a5843
AH
996 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
997 num_qps_mw = qp_count / mw_count + 1;
ef114ed5 998 else
e26a5843
AH
999 num_qps_mw = qp_count / mw_count;
1000
1001 mw_base = nt->mw_vec[mw_num].phys_addr;
1002 mw_size = nt->mw_vec[mw_num].phys_size;
ef114ed5 1003
e26a5843 1004 tx_size = (unsigned int)mw_size / num_qps_mw;
c92ba3c5 1005 qp_offset = tx_size * (qp_num / mw_count);
e26a5843
AH
1006
1007 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
282a2fee
JM
1008 if (!qp->tx_mw)
1009 return -EINVAL;
1010
e26a5843 1011 qp->tx_mw_phys = mw_base + qp_offset;
282a2fee
JM
1012 if (!qp->tx_mw_phys)
1013 return -EINVAL;
1014
793c20e9 1015 tx_size -= sizeof(struct ntb_rx_info);
282a2fee 1016 qp->rx_info = qp->tx_mw + tx_size;
793c20e9 1017
c9d534c8
JM
1018 /* Due to housekeeping, there must be atleast 2 buffs */
1019 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
793c20e9 1020 qp->tx_max_entry = tx_size / qp->tx_max_frame;
ef114ed5 1021
c8650fd0 1022 if (nt->debugfs_node_dir) {
fce8a7bb
JM
1023 char debugfs_name[4];
1024
1025 snprintf(debugfs_name, 4, "qp%d", qp_num);
1026 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
c8650fd0 1027 nt->debugfs_node_dir);
fce8a7bb
JM
1028
1029 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1030 qp->debugfs_dir, qp,
1031 &ntb_qp_debugfs_stats);
e26a5843
AH
1032 } else {
1033 qp->debugfs_dir = NULL;
1034 qp->debugfs_stats = NULL;
fce8a7bb
JM
1035 }
1036
1037 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
fca4d518 1038 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
fce8a7bb 1039
da2e5ae5 1040 spin_lock_init(&qp->ntb_rx_q_lock);
fce8a7bb
JM
1041 spin_lock_init(&qp->ntb_tx_free_q_lock);
1042
da2e5ae5 1043 INIT_LIST_HEAD(&qp->rx_post_q);
fce8a7bb
JM
1044 INIT_LIST_HEAD(&qp->rx_pend_q);
1045 INIT_LIST_HEAD(&qp->rx_free_q);
1046 INIT_LIST_HEAD(&qp->tx_free_q);
282a2fee 1047
e26a5843
AH
1048 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1049 (unsigned long)qp);
1050
282a2fee 1051 return 0;
fce8a7bb
JM
1052}
1053
e26a5843 1054static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
fce8a7bb 1055{
e26a5843
AH
1056 struct ntb_transport_ctx *nt;
1057 struct ntb_transport_mw *mw;
1058 unsigned int mw_count, qp_count;
1059 u64 qp_bitmap;
1199aa61 1060 int node;
fce8a7bb
JM
1061 int rc, i;
1062
e26a5843
AH
1063 if (ntb_db_is_unsafe(ndev))
1064 dev_dbg(&ndev->dev,
1065 "doorbell is unsafe, proceed anyway...\n");
1066 if (ntb_spad_is_unsafe(ndev))
1067 dev_dbg(&ndev->dev,
1068 "scratchpad is unsafe, proceed anyway...\n");
1069
1199aa61
AH
1070 node = dev_to_node(&ndev->dev);
1071
1072 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
fce8a7bb
JM
1073 if (!nt)
1074 return -ENOMEM;
1075
e26a5843
AH
1076 nt->ndev = ndev;
1077
1078 mw_count = ntb_mw_count(ndev);
1079
1080 nt->mw_count = mw_count;
1081
1199aa61
AH
1082 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1083 GFP_KERNEL, node);
e26a5843
AH
1084 if (!nt->mw_vec) {
1085 rc = -ENOMEM;
fce8a7bb
JM
1086 goto err;
1087 }
1088
e26a5843
AH
1089 for (i = 0; i < mw_count; i++) {
1090 mw = &nt->mw_vec[i];
1091
1092 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1093 &mw->xlat_align, &mw->xlat_align_size);
1094 if (rc)
1095 goto err1;
1096
06917f75 1097 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
e26a5843
AH
1098 if (!mw->vbase) {
1099 rc = -ENOMEM;
1100 goto err1;
1101 }
1102
1103 mw->buff_size = 0;
1104 mw->xlat_size = 0;
1105 mw->virt_addr = NULL;
1106 mw->dma_addr = 0;
948d3a65
JM
1107 }
1108
e26a5843
AH
1109 qp_bitmap = ntb_db_valid_mask(ndev);
1110
1111 qp_count = ilog2(qp_bitmap);
1112 if (max_num_clients && max_num_clients < qp_count)
1113 qp_count = max_num_clients;
1114 else if (mw_count < qp_count)
1115 qp_count = mw_count;
1116
1117 qp_bitmap &= BIT_ULL(qp_count) - 1;
1118
1119 nt->qp_count = qp_count;
1120 nt->qp_bitmap = qp_bitmap;
1121 nt->qp_bitmap_free = qp_bitmap;
fce8a7bb 1122
1199aa61
AH
1123 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1124 GFP_KERNEL, node);
e26a5843 1125 if (!nt->qp_vec) {
fce8a7bb 1126 rc = -ENOMEM;
d4adee09 1127 goto err1;
fce8a7bb
JM
1128 }
1129
c8650fd0
DJ
1130 if (nt_debugfs_dir) {
1131 nt->debugfs_node_dir =
1132 debugfs_create_dir(pci_name(ndev->pdev),
1133 nt_debugfs_dir);
1134 }
1135
e26a5843 1136 for (i = 0; i < qp_count; i++) {
282a2fee
JM
1137 rc = ntb_transport_init_queue(nt, i);
1138 if (rc)
d4adee09 1139 goto err2;
282a2fee 1140 }
fce8a7bb
JM
1141
1142 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
fca4d518 1143 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
fce8a7bb 1144
e26a5843 1145 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
fce8a7bb 1146 if (rc)
d4adee09 1147 goto err2;
fce8a7bb
JM
1148
1149 INIT_LIST_HEAD(&nt->client_devs);
1150 rc = ntb_bus_init(nt);
1151 if (rc)
d4adee09 1152 goto err3;
fce8a7bb 1153
e26a5843
AH
1154 nt->link_is_up = false;
1155 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1156 ntb_link_event(ndev);
fce8a7bb
JM
1157
1158 return 0;
1159
948d3a65 1160err3:
d4adee09 1161 ntb_clear_ctx(ndev);
948d3a65 1162err2:
d4adee09 1163 kfree(nt->qp_vec);
fce8a7bb 1164err1:
e26a5843
AH
1165 while (i--) {
1166 mw = &nt->mw_vec[i];
1167 iounmap(mw->vbase);
1168 }
d4adee09 1169 kfree(nt->mw_vec);
fce8a7bb 1170err:
fce8a7bb
JM
1171 kfree(nt);
1172 return rc;
1173}
1174
e26a5843 1175static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
fce8a7bb 1176{
e26a5843
AH
1177 struct ntb_transport_ctx *nt = ndev->ctx;
1178 struct ntb_transport_qp *qp;
1179 u64 qp_bitmap_alloc;
fce8a7bb
JM
1180 int i;
1181
fca4d518 1182 ntb_transport_link_cleanup(nt);
e26a5843
AH
1183 cancel_work_sync(&nt->link_cleanup);
1184 cancel_delayed_work_sync(&nt->link_work);
1185
1186 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
fce8a7bb
JM
1187
1188 /* verify that all the qp's are freed */
e26a5843
AH
1189 for (i = 0; i < nt->qp_count; i++) {
1190 qp = &nt->qp_vec[i];
1191 if (qp_bitmap_alloc & BIT_ULL(i))
1192 ntb_transport_free_queue(qp);
1193 debugfs_remove_recursive(qp->debugfs_dir);
1517a3f2 1194 }
fce8a7bb 1195
e26a5843
AH
1196 ntb_link_disable(ndev);
1197 ntb_clear_ctx(ndev);
fce8a7bb 1198
e26a5843 1199 ntb_bus_remove(nt);
fce8a7bb 1200
e26a5843 1201 for (i = nt->mw_count; i--; ) {
113fc505 1202 ntb_free_mw(nt, i);
e26a5843
AH
1203 iounmap(nt->mw_vec[i].vbase);
1204 }
fce8a7bb 1205
e26a5843
AH
1206 kfree(nt->qp_vec);
1207 kfree(nt->mw_vec);
fce8a7bb
JM
1208 kfree(nt);
1209}
1210
da2e5ae5 1211static void ntb_complete_rxc(struct ntb_transport_qp *qp)
fce8a7bb 1212{
da2e5ae5
AH
1213 struct ntb_queue_entry *entry;
1214 void *cb_data;
1215 unsigned int len;
1216 unsigned long irqflags;
1217
1218 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1219
1220 while (!list_empty(&qp->rx_post_q)) {
1221 entry = list_first_entry(&qp->rx_post_q,
1222 struct ntb_queue_entry, entry);
1223 if (!(entry->flags & DESC_DONE_FLAG))
1224 break;
1225
1226 entry->rx_hdr->flags = 0;
1227 iowrite32(entry->index, &qp->rx_info->entry);
1228
1229 cb_data = entry->cb_data;
1230 len = entry->len;
1231
1232 list_move_tail(&entry->entry, &qp->rx_free_q);
1233
1234 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1235
1236 if (qp->rx_handler && qp->client_ready)
1237 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1238
1239 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1240 }
282a2fee 1241
da2e5ae5
AH
1242 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1243}
fce8a7bb 1244
da2e5ae5
AH
1245static void ntb_rx_copy_callback(void *data)
1246{
1247 struct ntb_queue_entry *entry = data;
fce8a7bb 1248
da2e5ae5 1249 entry->flags |= DESC_DONE_FLAG;
448c6fb3 1250
da2e5ae5 1251 ntb_complete_rxc(entry->qp);
fce8a7bb
JM
1252}
1253
282a2fee
JM
1254static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1255{
1256 void *buf = entry->buf;
1257 size_t len = entry->len;
1258
1259 memcpy(buf, offset, len);
1260
e26a5843
AH
1261 /* Ensure that the data is fully copied out before clearing the flag */
1262 wmb();
1263
282a2fee
JM
1264 ntb_rx_copy_callback(entry);
1265}
1266
da2e5ae5 1267static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
282a2fee
JM
1268{
1269 struct dma_async_tx_descriptor *txd;
1270 struct ntb_transport_qp *qp = entry->qp;
569410ca 1271 struct dma_chan *chan = qp->rx_dma_chan;
282a2fee 1272 struct dma_device *device;
da2e5ae5 1273 size_t pay_off, buff_off, len;
6f57fd05 1274 struct dmaengine_unmap_data *unmap;
282a2fee
JM
1275 dma_cookie_t cookie;
1276 void *buf = entry->buf;
8c874cc1 1277 int retries = 0;
282a2fee 1278
da2e5ae5 1279 len = entry->len;
282a2fee
JM
1280
1281 if (!chan)
1282 goto err;
1283
53ca4fea 1284 if (len < copy_bytes)
905921e7 1285 goto err;
282a2fee
JM
1286
1287 device = chan->device;
e26a5843
AH
1288 pay_off = (size_t)offset & ~PAGE_MASK;
1289 buff_off = (size_t)buf & ~PAGE_MASK;
282a2fee
JM
1290
1291 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
905921e7 1292 goto err;
282a2fee 1293
6f57fd05
BZ
1294 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1295 if (!unmap)
905921e7 1296 goto err;
282a2fee 1297
6f57fd05
BZ
1298 unmap->len = len;
1299 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1300 pay_off, len, DMA_TO_DEVICE);
1301 if (dma_mapping_error(device->dev, unmap->addr[0]))
1302 goto err_get_unmap;
1303
1304 unmap->to_cnt = 1;
282a2fee 1305
6f57fd05
BZ
1306 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1307 buff_off, len, DMA_FROM_DEVICE);
1308 if (dma_mapping_error(device->dev, unmap->addr[1]))
1309 goto err_get_unmap;
1310
1311 unmap->from_cnt = 1;
1312
8c874cc1
DJ
1313 for (retries = 0; retries < DMA_RETRIES; retries++) {
1314 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1315 unmap->addr[0], len,
1316 DMA_PREP_INTERRUPT);
1317 if (txd)
1318 break;
1319
1320 set_current_state(TASK_INTERRUPTIBLE);
1321 schedule_timeout(DMA_OUT_RESOURCE_TO);
1322 }
1323
1324 if (!txd) {
1325 qp->dma_rx_prep_err++;
6f57fd05 1326 goto err_get_unmap;
8c874cc1 1327 }
282a2fee
JM
1328
1329 txd->callback = ntb_rx_copy_callback;
1330 txd->callback_param = entry;
6f57fd05 1331 dma_set_unmap(txd, unmap);
282a2fee
JM
1332
1333 cookie = dmaengine_submit(txd);
1334 if (dma_submit_error(cookie))
6f57fd05
BZ
1335 goto err_set_unmap;
1336
1337 dmaengine_unmap_put(unmap);
282a2fee
JM
1338
1339 qp->last_cookie = cookie;
1340
1341 qp->rx_async++;
1342
1343 return;
1344
6f57fd05
BZ
1345err_set_unmap:
1346 dmaengine_unmap_put(unmap);
1347err_get_unmap:
1348 dmaengine_unmap_put(unmap);
282a2fee
JM
1349err:
1350 ntb_memcpy_rx(entry, offset);
1351 qp->rx_memcpy++;
1352}
1353
fce8a7bb
JM
1354static int ntb_process_rxc(struct ntb_transport_qp *qp)
1355{
1356 struct ntb_payload_header *hdr;
1357 struct ntb_queue_entry *entry;
1358 void *offset;
1359
793c20e9
JM
1360 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1361 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1362
e26a5843
AH
1363 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1364 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
fce8a7bb 1365
fce8a7bb 1366 if (!(hdr->flags & DESC_DONE_FLAG)) {
e26a5843 1367 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
fce8a7bb
JM
1368 qp->rx_ring_empty++;
1369 return -EAGAIN;
1370 }
1371
e26a5843
AH
1372 if (hdr->flags & LINK_DOWN_FLAG) {
1373 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1374 ntb_qp_link_down(qp);
1375 hdr->flags = 0;
c0900b33 1376 return -EAGAIN;
e26a5843
AH
1377 }
1378
1379 if (hdr->ver != (u32)qp->rx_pkts) {
1380 dev_dbg(&qp->ndev->pdev->dev,
1381 "version mismatch, expected %llu - got %u\n",
1382 qp->rx_pkts, hdr->ver);
fce8a7bb
JM
1383 qp->rx_err_ver++;
1384 return -EIO;
1385 }
1386
da2e5ae5 1387 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
e26a5843
AH
1388 if (!entry) {
1389 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1390 qp->rx_err_no_buf++;
da2e5ae5 1391 return -EAGAIN;
fce8a7bb
JM
1392 }
1393
da2e5ae5
AH
1394 entry->rx_hdr = hdr;
1395 entry->index = qp->rx_index;
1396
282a2fee 1397 if (hdr->len > entry->len) {
e26a5843
AH
1398 dev_dbg(&qp->ndev->pdev->dev,
1399 "receive buffer overflow! Wanted %d got %d\n",
fce8a7bb 1400 hdr->len, entry->len);
e26a5843 1401 qp->rx_err_oflow++;
282a2fee 1402
da2e5ae5
AH
1403 entry->len = -EIO;
1404 entry->flags |= DESC_DONE_FLAG;
fce8a7bb 1405
da2e5ae5
AH
1406 ntb_complete_rxc(qp);
1407 } else {
1408 dev_dbg(&qp->ndev->pdev->dev,
1409 "RX OK index %u ver %u size %d into buf size %d\n",
1410 qp->rx_index, hdr->ver, hdr->len, entry->len);
e26a5843 1411
da2e5ae5
AH
1412 qp->rx_bytes += hdr->len;
1413 qp->rx_pkts++;
e26a5843 1414
da2e5ae5 1415 entry->len = hdr->len;
282a2fee 1416
da2e5ae5
AH
1417 ntb_async_rx(entry, offset);
1418 }
fce8a7bb 1419
282a2fee
JM
1420 qp->rx_index++;
1421 qp->rx_index %= qp->rx_max_entry;
1422
1423 return 0;
fce8a7bb
JM
1424}
1425
e26a5843 1426static void ntb_transport_rxc_db(unsigned long data)
fce8a7bb 1427{
e26a5843 1428 struct ntb_transport_qp *qp = (void *)data;
c336acd3 1429 int rc, i;
fce8a7bb 1430
e26a5843
AH
1431 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1432 __func__, qp->qp_num);
e8aeb60c 1433
c336acd3
JM
1434 /* Limit the number of packets processed in a single interrupt to
1435 * provide fairness to others
1436 */
1437 for (i = 0; i < qp->rx_max_entry; i++) {
fce8a7bb 1438 rc = ntb_process_rxc(qp);
c336acd3
JM
1439 if (rc)
1440 break;
1441 }
282a2fee 1442
569410ca
DJ
1443 if (i && qp->rx_dma_chan)
1444 dma_async_issue_pending(qp->rx_dma_chan);
fce8a7bb 1445
e26a5843
AH
1446 if (i == qp->rx_max_entry) {
1447 /* there is more work to do */
e9021331
DJ
1448 if (qp->active)
1449 tasklet_schedule(&qp->rxc_db_work);
e26a5843
AH
1450 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1451 /* the doorbell bit is set: clear it */
1452 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1453 /* ntb_db_read ensures ntb_db_clear write is committed */
1454 ntb_db_read(qp->ndev);
1455
1456 /* an interrupt may have arrived between finishing
1457 * ntb_process_rxc and clearing the doorbell bit:
1458 * there might be some more work to do.
1459 */
e9021331
DJ
1460 if (qp->active)
1461 tasklet_schedule(&qp->rxc_db_work);
e26a5843 1462 }
fce8a7bb
JM
1463}
1464
282a2fee 1465static void ntb_tx_copy_callback(void *data)
fce8a7bb 1466{
282a2fee
JM
1467 struct ntb_queue_entry *entry = data;
1468 struct ntb_transport_qp *qp = entry->qp;
1469 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
fce8a7bb 1470
74465645 1471 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
fce8a7bb 1472
e26a5843 1473 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
fce8a7bb
JM
1474
1475 /* The entry length can only be zero if the packet is intended to be a
1476 * "link down" or similar. Since no payload is being sent in these
1477 * cases, there is nothing to add to the completion queue.
1478 */
1479 if (entry->len > 0) {
1480 qp->tx_bytes += entry->len;
1481
1482 if (qp->tx_handler)
1483 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1484 entry->len);
1485 }
1486
1487 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1488}
1489
282a2fee 1490static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
fce8a7bb 1491{
06917f75
DJ
1492#ifdef ARCH_HAS_NOCACHE_UACCESS
1493 /*
1494 * Using non-temporal mov to improve performance on non-cached
1495 * writes, even though we aren't actually copying from user space.
1496 */
1497 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1498#else
282a2fee 1499 memcpy_toio(offset, entry->buf, entry->len);
06917f75 1500#endif
282a2fee 1501
e26a5843
AH
1502 /* Ensure that the data is fully copied out before setting the flags */
1503 wmb();
1504
282a2fee
JM
1505 ntb_tx_copy_callback(entry);
1506}
1507
1508static void ntb_async_tx(struct ntb_transport_qp *qp,
1509 struct ntb_queue_entry *entry)
1510{
1511 struct ntb_payload_header __iomem *hdr;
1512 struct dma_async_tx_descriptor *txd;
569410ca 1513 struct dma_chan *chan = qp->tx_dma_chan;
282a2fee
JM
1514 struct dma_device *device;
1515 size_t dest_off, buff_off;
6f57fd05
BZ
1516 struct dmaengine_unmap_data *unmap;
1517 dma_addr_t dest;
282a2fee 1518 dma_cookie_t cookie;
74465645 1519 void __iomem *offset;
282a2fee
JM
1520 size_t len = entry->len;
1521 void *buf = entry->buf;
8c874cc1 1522 int retries = 0;
fce8a7bb 1523
793c20e9 1524 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
282a2fee
JM
1525 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1526 entry->tx_hdr = hdr;
fce8a7bb 1527
282a2fee 1528 iowrite32(entry->len, &hdr->len);
e26a5843 1529 iowrite32((u32)qp->tx_pkts, &hdr->ver);
282a2fee
JM
1530
1531 if (!chan)
1532 goto err;
1533
1534 if (len < copy_bytes)
1535 goto err;
1536
1537 device = chan->device;
1538 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
e26a5843
AH
1539 buff_off = (size_t)buf & ~PAGE_MASK;
1540 dest_off = (size_t)dest & ~PAGE_MASK;
282a2fee
JM
1541
1542 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1543 goto err;
1544
6f57fd05
BZ
1545 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1546 if (!unmap)
282a2fee
JM
1547 goto err;
1548
6f57fd05
BZ
1549 unmap->len = len;
1550 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1551 buff_off, len, DMA_TO_DEVICE);
1552 if (dma_mapping_error(device->dev, unmap->addr[0]))
1553 goto err_get_unmap;
1554
1555 unmap->to_cnt = 1;
1556
8c874cc1
DJ
1557 for (retries = 0; retries < DMA_RETRIES; retries++) {
1558 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
1559 len, DMA_PREP_INTERRUPT);
1560 if (txd)
1561 break;
1562
1563 set_current_state(TASK_INTERRUPTIBLE);
1564 schedule_timeout(DMA_OUT_RESOURCE_TO);
1565 }
1566
1567 if (!txd) {
1568 qp->dma_tx_prep_err++;
6f57fd05 1569 goto err_get_unmap;
8c874cc1 1570 }
282a2fee
JM
1571
1572 txd->callback = ntb_tx_copy_callback;
1573 txd->callback_param = entry;
6f57fd05 1574 dma_set_unmap(txd, unmap);
282a2fee
JM
1575
1576 cookie = dmaengine_submit(txd);
1577 if (dma_submit_error(cookie))
6f57fd05
BZ
1578 goto err_set_unmap;
1579
1580 dmaengine_unmap_put(unmap);
282a2fee
JM
1581
1582 dma_async_issue_pending(chan);
1583 qp->tx_async++;
1584
1585 return;
6f57fd05
BZ
1586err_set_unmap:
1587 dmaengine_unmap_put(unmap);
1588err_get_unmap:
1589 dmaengine_unmap_put(unmap);
282a2fee
JM
1590err:
1591 ntb_memcpy_tx(entry, offset);
1592 qp->tx_memcpy++;
1593}
1594
1595static int ntb_process_tx(struct ntb_transport_qp *qp,
1596 struct ntb_queue_entry *entry)
1597{
793c20e9 1598 if (qp->tx_index == qp->remote_rx_info->entry) {
fce8a7bb
JM
1599 qp->tx_ring_full++;
1600 return -EAGAIN;
1601 }
1602
ef114ed5 1603 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
fce8a7bb 1604 if (qp->tx_handler)
179f912a 1605 qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
fce8a7bb
JM
1606
1607 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1608 &qp->tx_free_q);
1609 return 0;
1610 }
1611
282a2fee 1612 ntb_async_tx(qp, entry);
fce8a7bb 1613
793c20e9
JM
1614 qp->tx_index++;
1615 qp->tx_index %= qp->tx_max_entry;
fce8a7bb
JM
1616
1617 qp->tx_pkts++;
1618
1619 return 0;
1620}
1621
1622static void ntb_send_link_down(struct ntb_transport_qp *qp)
1623{
e26a5843 1624 struct pci_dev *pdev = qp->ndev->pdev;
fce8a7bb
JM
1625 struct ntb_queue_entry *entry;
1626 int i, rc;
1627
e26a5843 1628 if (!qp->link_is_up)
fce8a7bb
JM
1629 return;
1630
e22e0b9d 1631 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
fce8a7bb
JM
1632
1633 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
f766755c 1634 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
fce8a7bb
JM
1635 if (entry)
1636 break;
1637 msleep(100);
1638 }
1639
1640 if (!entry)
1641 return;
1642
1643 entry->cb_data = NULL;
1644 entry->buf = NULL;
1645 entry->len = 0;
1646 entry->flags = LINK_DOWN_FLAG;
1647
1648 rc = ntb_process_tx(qp, entry);
1649 if (rc)
1650 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1651 qp->qp_num);
2849b5d7
AH
1652
1653 ntb_qp_link_down_reset(qp);
fce8a7bb
JM
1654}
1655
1199aa61
AH
1656static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1657{
1658 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1659}
1660
fce8a7bb
JM
1661/**
1662 * ntb_transport_create_queue - Create a new NTB transport layer queue
1663 * @rx_handler: receive callback function
1664 * @tx_handler: transmit callback function
1665 * @event_handler: event callback function
1666 *
1667 * Create a new NTB transport layer queue and provide the queue with a callback
1668 * routine for both transmit and receive. The receive callback routine will be
1669 * used to pass up data when the transport has received it on the queue. The
1670 * transmit callback routine will be called when the transport has completed the
1671 * transmission of the data on the queue and the data is ready to be freed.
1672 *
1673 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1674 */
1675struct ntb_transport_qp *
e26a5843 1676ntb_transport_create_queue(void *data, struct device *client_dev,
fce8a7bb
JM
1677 const struct ntb_queue_handlers *handlers)
1678{
e26a5843
AH
1679 struct ntb_dev *ndev;
1680 struct pci_dev *pdev;
1681 struct ntb_transport_ctx *nt;
fce8a7bb
JM
1682 struct ntb_queue_entry *entry;
1683 struct ntb_transport_qp *qp;
e26a5843 1684 u64 qp_bit;
fce8a7bb 1685 unsigned int free_queue;
1199aa61
AH
1686 dma_cap_mask_t dma_mask;
1687 int node;
e26a5843 1688 int i;
fce8a7bb 1689
e26a5843
AH
1690 ndev = dev_ntb(client_dev->parent);
1691 pdev = ndev->pdev;
1692 nt = ndev->ctx;
fce8a7bb 1693
1199aa61
AH
1694 node = dev_to_node(&ndev->dev);
1695
fce8a7bb
JM
1696 free_queue = ffs(nt->qp_bitmap);
1697 if (!free_queue)
1698 goto err;
1699
1700 /* decrement free_queue to make it zero based */
1701 free_queue--;
1702
e26a5843
AH
1703 qp = &nt->qp_vec[free_queue];
1704 qp_bit = BIT_ULL(qp->qp_num);
1705
1706 nt->qp_bitmap_free &= ~qp_bit;
fce8a7bb 1707
fce8a7bb
JM
1708 qp->cb_data = data;
1709 qp->rx_handler = handlers->rx_handler;
1710 qp->tx_handler = handlers->tx_handler;
1711 qp->event_handler = handlers->event_handler;
1712
1199aa61
AH
1713 dma_cap_zero(dma_mask);
1714 dma_cap_set(DMA_MEMCPY, dma_mask);
1715
a41ef053 1716 if (use_dma) {
569410ca
DJ
1717 qp->tx_dma_chan =
1718 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1719 (void *)(unsigned long)node);
1720 if (!qp->tx_dma_chan)
1721 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
1722
1723 qp->rx_dma_chan =
1724 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1725 (void *)(unsigned long)node);
1726 if (!qp->rx_dma_chan)
1727 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
a41ef053 1728 } else {
569410ca
DJ
1729 qp->tx_dma_chan = NULL;
1730 qp->rx_dma_chan = NULL;
a41ef053 1731 }
569410ca
DJ
1732
1733 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1734 qp->tx_dma_chan ? "DMA" : "CPU");
1735
1736 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
1737 qp->rx_dma_chan ? "DMA" : "CPU");
282a2fee 1738
fce8a7bb 1739 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1199aa61 1740 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
fce8a7bb
JM
1741 if (!entry)
1742 goto err1;
1743
282a2fee 1744 entry->qp = qp;
da2e5ae5 1745 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
f766755c 1746 &qp->rx_free_q);
fce8a7bb 1747 }
a754a8fc 1748 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
fce8a7bb 1749
a754a8fc 1750 for (i = 0; i < qp->tx_max_entry; i++) {
1199aa61 1751 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
fce8a7bb
JM
1752 if (!entry)
1753 goto err2;
1754
282a2fee 1755 entry->qp = qp;
fce8a7bb 1756 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
f766755c 1757 &qp->tx_free_q);
fce8a7bb
JM
1758 }
1759
e26a5843
AH
1760 ntb_db_clear(qp->ndev, qp_bit);
1761 ntb_db_clear_mask(qp->ndev, qp_bit);
fce8a7bb
JM
1762
1763 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1764
1765 return qp;
1766
fce8a7bb 1767err2:
f766755c 1768 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
fce8a7bb
JM
1769 kfree(entry);
1770err1:
a754a8fc 1771 qp->rx_alloc_entry = 0;
da2e5ae5 1772 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
fce8a7bb 1773 kfree(entry);
569410ca
DJ
1774 if (qp->tx_dma_chan)
1775 dma_release_channel(qp->tx_dma_chan);
1776 if (qp->rx_dma_chan)
1777 dma_release_channel(qp->rx_dma_chan);
e26a5843 1778 nt->qp_bitmap_free |= qp_bit;
fce8a7bb
JM
1779err:
1780 return NULL;
1781}
1782EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1783
1784/**
1785 * ntb_transport_free_queue - Frees NTB transport queue
1786 * @qp: NTB queue to be freed
1787 *
1788 * Frees NTB transport queue
1789 */
1790void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1791{
186f27ff 1792 struct pci_dev *pdev;
fce8a7bb 1793 struct ntb_queue_entry *entry;
e26a5843 1794 u64 qp_bit;
fce8a7bb
JM
1795
1796 if (!qp)
1797 return;
1798
e26a5843 1799 pdev = qp->ndev->pdev;
186f27ff 1800
e9021331
DJ
1801 qp->active = false;
1802
569410ca
DJ
1803 if (qp->tx_dma_chan) {
1804 struct dma_chan *chan = qp->tx_dma_chan;
1805 /* Putting the dma_chan to NULL will force any new traffic to be
1806 * processed by the CPU instead of the DAM engine
1807 */
1808 qp->tx_dma_chan = NULL;
1809
1810 /* Try to be nice and wait for any queued DMA engine
1811 * transactions to process before smashing it with a rock
1812 */
1813 dma_sync_wait(chan, qp->last_cookie);
1814 dmaengine_terminate_all(chan);
1815 dma_release_channel(chan);
1816 }
1817
1818 if (qp->rx_dma_chan) {
1819 struct dma_chan *chan = qp->rx_dma_chan;
282a2fee
JM
1820 /* Putting the dma_chan to NULL will force any new traffic to be
1821 * processed by the CPU instead of the DAM engine
1822 */
569410ca 1823 qp->rx_dma_chan = NULL;
282a2fee
JM
1824
1825 /* Try to be nice and wait for any queued DMA engine
1826 * transactions to process before smashing it with a rock
1827 */
1828 dma_sync_wait(chan, qp->last_cookie);
1829 dmaengine_terminate_all(chan);
1199aa61 1830 dma_release_channel(chan);
282a2fee 1831 }
fce8a7bb 1832
e26a5843
AH
1833 qp_bit = BIT_ULL(qp->qp_num);
1834
1835 ntb_db_set_mask(qp->ndev, qp_bit);
e9021331 1836 tasklet_kill(&qp->rxc_db_work);
fce8a7bb 1837
282a2fee
JM
1838 cancel_delayed_work_sync(&qp->link_work);
1839
e26a5843
AH
1840 qp->cb_data = NULL;
1841 qp->rx_handler = NULL;
1842 qp->tx_handler = NULL;
1843 qp->event_handler = NULL;
1844
da2e5ae5 1845 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
fce8a7bb
JM
1846 kfree(entry);
1847
da2e5ae5
AH
1848 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1849 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1850 kfree(entry);
1851 }
1852
1853 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1854 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
fce8a7bb
JM
1855 kfree(entry);
1856 }
1857
f766755c 1858 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
fce8a7bb
JM
1859 kfree(entry);
1860
30a4bb1e 1861 qp->transport->qp_bitmap_free |= qp_bit;
fce8a7bb
JM
1862
1863 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1864}
1865EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1866
1867/**
1868 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1869 * @qp: NTB queue to be freed
1870 * @len: pointer to variable to write enqueued buffers length
1871 *
1872 * Dequeues unused buffers from receive queue. Should only be used during
1873 * shutdown of qp.
1874 *
1875 * RETURNS: NULL error value on error, or void* for success.
1876 */
1877void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1878{
1879 struct ntb_queue_entry *entry;
1880 void *buf;
1881
e26a5843 1882 if (!qp || qp->client_ready)
fce8a7bb
JM
1883 return NULL;
1884
da2e5ae5 1885 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
fce8a7bb
JM
1886 if (!entry)
1887 return NULL;
1888
1889 buf = entry->cb_data;
1890 *len = entry->len;
1891
da2e5ae5 1892 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
fce8a7bb
JM
1893
1894 return buf;
1895}
1896EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1897
1898/**
1899 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1900 * @qp: NTB transport layer queue the entry is to be enqueued on
1901 * @cb: per buffer pointer for callback function to use
1902 * @data: pointer to data buffer that incoming packets will be copied into
1903 * @len: length of the data buffer
1904 *
1905 * Enqueue a new receive buffer onto the transport queue into which a NTB
1906 * payload can be received into.
1907 *
1908 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1909 */
1910int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1911 unsigned int len)
1912{
1913 struct ntb_queue_entry *entry;
1914
1915 if (!qp)
1916 return -EINVAL;
1917
da2e5ae5 1918 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
fce8a7bb
JM
1919 if (!entry)
1920 return -ENOMEM;
1921
1922 entry->cb_data = cb;
1923 entry->buf = data;
1924 entry->len = len;
da2e5ae5
AH
1925 entry->flags = 0;
1926
1927 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
fce8a7bb 1928
e9021331
DJ
1929 if (qp->active)
1930 tasklet_schedule(&qp->rxc_db_work);
fce8a7bb
JM
1931
1932 return 0;
1933}
1934EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1935
1936/**
1937 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1938 * @qp: NTB transport layer queue the entry is to be enqueued on
1939 * @cb: per buffer pointer for callback function to use
1940 * @data: pointer to data buffer that will be sent
1941 * @len: length of the data buffer
1942 *
1943 * Enqueue a new transmit buffer onto the transport queue from which a NTB
f9a2cf89 1944 * payload will be transmitted. This assumes that a lock is being held to
fce8a7bb
JM
1945 * serialize access to the qp.
1946 *
1947 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1948 */
1949int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1950 unsigned int len)
1951{
1952 struct ntb_queue_entry *entry;
1953 int rc;
1954
e26a5843 1955 if (!qp || !qp->link_is_up || !len)
fce8a7bb
JM
1956 return -EINVAL;
1957
1958 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
282a2fee
JM
1959 if (!entry) {
1960 qp->tx_err_no_buf++;
e74bfeed 1961 return -EBUSY;
282a2fee 1962 }
fce8a7bb
JM
1963
1964 entry->cb_data = cb;
1965 entry->buf = data;
1966 entry->len = len;
1967 entry->flags = 0;
1968
1969 rc = ntb_process_tx(qp, entry);
1970 if (rc)
1971 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1972 &qp->tx_free_q);
1973
1974 return rc;
1975}
1976EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1977
1978/**
1979 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1980 * @qp: NTB transport layer queue to be enabled
1981 *
1982 * Notify NTB transport layer of client readiness to use queue
1983 */
1984void ntb_transport_link_up(struct ntb_transport_qp *qp)
1985{
1986 if (!qp)
1987 return;
1988
e26a5843 1989 qp->client_ready = true;
fce8a7bb 1990
e26a5843 1991 if (qp->transport->link_is_up)
fce8a7bb
JM
1992 schedule_delayed_work(&qp->link_work, 0);
1993}
1994EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1995
1996/**
1997 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1998 * @qp: NTB transport layer queue to be disabled
1999 *
2000 * Notify NTB transport layer of client's desire to no longer receive data on
2001 * transport queue specified. It is the client's responsibility to ensure all
f9a2cf89 2002 * entries on queue are purged or otherwise handled appropriately.
fce8a7bb
JM
2003 */
2004void ntb_transport_link_down(struct ntb_transport_qp *qp)
2005{
e26a5843 2006 int val;
fce8a7bb
JM
2007
2008 if (!qp)
2009 return;
2010
e26a5843 2011 qp->client_ready = false;
fce8a7bb 2012
e26a5843 2013 val = ntb_spad_read(qp->ndev, QP_LINKS);
fce8a7bb 2014
e26a5843
AH
2015 ntb_peer_spad_write(qp->ndev, QP_LINKS,
2016 val & ~BIT(qp->qp_num));
fce8a7bb 2017
e26a5843 2018 if (qp->link_is_up)
fce8a7bb
JM
2019 ntb_send_link_down(qp);
2020 else
2021 cancel_delayed_work_sync(&qp->link_work);
2022}
2023EXPORT_SYMBOL_GPL(ntb_transport_link_down);
2024
2025/**
2026 * ntb_transport_link_query - Query transport link state
2027 * @qp: NTB transport layer queue to be queried
2028 *
2029 * Query connectivity to the remote system of the NTB transport queue
2030 *
2031 * RETURNS: true for link up or false for link down
2032 */
2033bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2034{
186f27ff
JM
2035 if (!qp)
2036 return false;
2037
e26a5843 2038 return qp->link_is_up;
fce8a7bb
JM
2039}
2040EXPORT_SYMBOL_GPL(ntb_transport_link_query);
2041
2042/**
2043 * ntb_transport_qp_num - Query the qp number
2044 * @qp: NTB transport layer queue to be queried
2045 *
2046 * Query qp number of the NTB transport queue
2047 *
2048 * RETURNS: a zero based number specifying the qp number
2049 */
2050unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2051{
186f27ff
JM
2052 if (!qp)
2053 return 0;
2054
fce8a7bb
JM
2055 return qp->qp_num;
2056}
2057EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
2058
2059/**
2060 * ntb_transport_max_size - Query the max payload size of a qp
2061 * @qp: NTB transport layer queue to be queried
2062 *
2063 * Query the maximum payload size permissible on the given qp
2064 *
2065 * RETURNS: the max payload size of a qp
2066 */
ef114ed5 2067unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
fce8a7bb 2068{
04afde45 2069 unsigned int max_size;
569410ca 2070 unsigned int copy_align;
04afde45 2071 struct dma_chan *rx_chan, *tx_chan;
282a2fee 2072
186f27ff
JM
2073 if (!qp)
2074 return 0;
2075
04afde45
DJ
2076 rx_chan = qp->rx_dma_chan;
2077 tx_chan = qp->tx_dma_chan;
282a2fee 2078
04afde45
DJ
2079 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
2080 tx_chan ? tx_chan->device->copy_align : 0);
569410ca 2081
282a2fee 2082 /* If DMA engine usage is possible, try to find the max size for that */
04afde45
DJ
2083 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2084 max_size = round_down(max_size, 1 << copy_align);
282a2fee 2085
04afde45 2086 return max_size;
fce8a7bb
JM
2087}
2088EXPORT_SYMBOL_GPL(ntb_transport_max_size);
e26a5843 2089
e74bfeed
DJ
2090unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2091{
2092 unsigned int head = qp->tx_index;
2093 unsigned int tail = qp->remote_rx_info->entry;
2094
2095 return tail > head ? tail - head : qp->tx_max_entry + tail - head;
2096}
2097EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2098
e26a5843
AH
2099static void ntb_transport_doorbell_callback(void *data, int vector)
2100{
2101 struct ntb_transport_ctx *nt = data;
2102 struct ntb_transport_qp *qp;
2103 u64 db_bits;
2104 unsigned int qp_num;
2105
2106 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2107 ntb_db_vector_mask(nt->ndev, vector));
2108
2109 while (db_bits) {
2110 qp_num = __ffs(db_bits);
2111 qp = &nt->qp_vec[qp_num];
2112
e9021331
DJ
2113 if (qp->active)
2114 tasklet_schedule(&qp->rxc_db_work);
e26a5843
AH
2115
2116 db_bits &= ~BIT_ULL(qp_num);
2117 }
2118}
2119
2120static const struct ntb_ctx_ops ntb_transport_ops = {
2121 .link_event = ntb_transport_event_callback,
2122 .db_event = ntb_transport_doorbell_callback,
2123};
2124
2125static struct ntb_client ntb_transport_client = {
2126 .ops = {
2127 .probe = ntb_transport_probe,
2128 .remove = ntb_transport_free,
2129 },
2130};
2131
2132static int __init ntb_transport_init(void)
2133{
2134 int rc;
2135
7eb38781
DJ
2136 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2137
e26a5843
AH
2138 if (debugfs_initialized())
2139 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2140
2141 rc = bus_register(&ntb_transport_bus);
2142 if (rc)
2143 goto err_bus;
2144
2145 rc = ntb_register_client(&ntb_transport_client);
2146 if (rc)
2147 goto err_client;
2148
2149 return 0;
2150
2151err_client:
2152 bus_unregister(&ntb_transport_bus);
2153err_bus:
2154 debugfs_remove_recursive(nt_debugfs_dir);
2155 return rc;
2156}
2157module_init(ntb_transport_init);
2158
2159static void __exit ntb_transport_exit(void)
2160{
2161 debugfs_remove_recursive(nt_debugfs_dir);
2162
2163 ntb_unregister_client(&ntb_transport_client);
2164 bus_unregister(&ntb_transport_bus);
2165}
2166module_exit(ntb_transport_exit);