]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
staging/lustre: remove CFS_MODULE_PARM
[mirror_ubuntu-kernels.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd_modparams.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd_modparams.c
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41 #include "o2iblnd.h"
42
43 static int service = 987;
44 module_param(service, int, 0444);
45 MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)");
46
47 static int cksum = 0;
48 module_param(cksum, int, 0644);
49 MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums");
50
51 static int timeout = 50;
52 module_param(timeout, int, 0644);
53 MODULE_PARM_DESC(timeout, "timeout (seconds)");
54
55 /* Number of threads in each scheduler pool which is percpt,
56 * we will estimate reasonable value based on CPUs if it's set to zero. */
57 static int nscheds;
58 module_param(nscheds, int, 0444);
59 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
60
61 /* NB: this value is shared by all CPTs, it can grow at runtime */
62 static int ntx = 512;
63 module_param(ntx, int, 0444);
64 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
65
66 /* NB: this value is shared by all CPTs */
67 static int credits = 256;
68 module_param(credits, int, 0444);
69 MODULE_PARM_DESC(credits, "# concurrent sends");
70
71 static int peer_credits = 8;
72 module_param(peer_credits, int, 0444);
73 MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer");
74
75 static int peer_credits_hiw = 0;
76 module_param(peer_credits_hiw, int, 0444);
77 MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits");
78
79 static int peer_buffer_credits = 0;
80 module_param(peer_buffer_credits, int, 0444);
81 MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits");
82
83 static int peer_timeout = 180;
84 module_param(peer_timeout, int, 0444);
85 MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
86
87 static char *ipif_name = "ib0";
88 module_param(ipif_name, charp, 0444);
89 MODULE_PARM_DESC(ipif_name, "IPoIB interface name");
90
91 static int retry_count = 5;
92 module_param(retry_count, int, 0644);
93 MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received");
94
95 static int rnr_retry_count = 6;
96 module_param(rnr_retry_count, int, 0644);
97 MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions");
98
99 static int keepalive = 100;
100 module_param(keepalive, int, 0644);
101 MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive");
102
103 static int ib_mtu = 0;
104 module_param(ib_mtu, int, 0444);
105 MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096");
106
107 static int concurrent_sends = 0;
108 module_param(concurrent_sends, int, 0444);
109 MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
110
111 static int map_on_demand = 0;
112 module_param(map_on_demand, int, 0444);
113 MODULE_PARM_DESC(map_on_demand, "map on demand");
114
115 /* NB: this value is shared by all CPTs, it can grow at runtime */
116 static int fmr_pool_size = 512;
117 module_param(fmr_pool_size, int, 0444);
118 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
119
120 /* NB: this value is shared by all CPTs, it can grow at runtime */
121 static int fmr_flush_trigger = 384;
122 module_param(fmr_flush_trigger, int, 0444);
123 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
124
125 static int fmr_cache = 1;
126 module_param(fmr_cache, int, 0444);
127 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
128
129 /* NB: this value is shared by all CPTs, it can grow at runtime */
130 static int pmr_pool_size = 512;
131 module_param(pmr_pool_size, int, 0444);
132 MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
133
134 /*
135 * 0: disable failover
136 * 1: enable failover if necessary
137 * 2: force to failover (for debug)
138 */
139 static int dev_failover = 0;
140 module_param(dev_failover, int, 0444);
141 MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
142
143
144 static int require_privileged_port = 0;
145 module_param(require_privileged_port, int, 0644);
146 MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");
147
148 static int use_privileged_port = 1;
149 module_param(use_privileged_port, int, 0644);
150 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
151
152 kib_tunables_t kiblnd_tunables = {
153 .kib_dev_failover = &dev_failover,
154 .kib_service = &service,
155 .kib_cksum = &cksum,
156 .kib_timeout = &timeout,
157 .kib_keepalive = &keepalive,
158 .kib_ntx = &ntx,
159 .kib_credits = &credits,
160 .kib_peertxcredits = &peer_credits,
161 .kib_peercredits_hiw = &peer_credits_hiw,
162 .kib_peerrtrcredits = &peer_buffer_credits,
163 .kib_peertimeout = &peer_timeout,
164 .kib_default_ipif = &ipif_name,
165 .kib_retry_count = &retry_count,
166 .kib_rnr_retry_count = &rnr_retry_count,
167 .kib_concurrent_sends = &concurrent_sends,
168 .kib_ib_mtu = &ib_mtu,
169 .kib_map_on_demand = &map_on_demand,
170 .kib_fmr_pool_size = &fmr_pool_size,
171 .kib_fmr_flush_trigger = &fmr_flush_trigger,
172 .kib_fmr_cache = &fmr_cache,
173 .kib_pmr_pool_size = &pmr_pool_size,
174 .kib_require_priv_port = &require_privileged_port,
175 .kib_use_priv_port = &use_privileged_port,
176 .kib_nscheds = &nscheds
177 };
178
179 int
180 kiblnd_tunables_init (void)
181 {
182 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
183 CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
184 *kiblnd_tunables.kib_ib_mtu);
185 return -EINVAL;
186 }
187
188 if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
189 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
190
191 if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
192 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
193
194 if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
195 *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
196
197 if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
198 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
199
200 if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
201 *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
202
203 if (*kiblnd_tunables.kib_map_on_demand < 0 ||
204 *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
205 *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
206
207 if (*kiblnd_tunables.kib_map_on_demand == 1)
208 *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
209
210 if (*kiblnd_tunables.kib_concurrent_sends == 0) {
211 if (*kiblnd_tunables.kib_map_on_demand > 0 &&
212 *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
213 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
214 else
215 *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
216 }
217
218 if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
219 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
220
221 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
222 *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
223
224 if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
225 CWARN("Concurrent sends %d is lower than message queue size: %d, "
226 "performance may drop slightly.\n",
227 *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
228 }
229
230 return 0;
231 }