]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
Merge branch 'stable/for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / include / lustre / lustre_idl.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/include/lustre/lustre_idl.h
33 *
34 * Lustre wire protocol definitions.
35 */
36
37/** \defgroup lustreidl lustreidl
38 *
39 * Lustre wire protocol definitions.
40 *
41 * ALL structs passing over the wire should be declared here. Structs
42 * that are used in interfaces with userspace should go in lustre_user.h.
43 *
44 * All structs being declared here should be built from simple fixed-size
45 * types (__u8, __u16, __u32, __u64) or be built from other types or
46 * structs also declared in this file. Similarly, all flags and magic
47 * values in those structs should also be declared here. This ensures
48 * that the Lustre wire protocol is not influenced by external dependencies.
49 *
50 * The only other acceptable items in this file are VERY SIMPLE accessor
36dc51cc 51 * functions to avoid callers grubbing inside the structures. Nothing that
d7e09d03
PT
52 * depends on external functions or definitions should be in here.
53 *
54 * Structs must be properly aligned to put 64-bit values on an 8-byte
55 * boundary. Any structs being added here must also be added to
56 * utils/wirecheck.c and "make newwiretest" run to regenerate the
57 * utils/wiretest.c sources. This allows us to verify that wire structs
58 * have the proper alignment/size on all architectures.
59 *
60 * DO NOT CHANGE any of the structs, flags, values declared here and used
61 * in released Lustre versions. Some structs may have padding fields that
62 * can be used. Some structs might allow addition at the end (verify this
63 * in the code to ensure that new/old clients that see this larger struct
64 * do not fail, otherwise you need to implement protocol compatibility).
65 *
d7e09d03
PT
66 * @{
67 */
68
69#ifndef _LUSTRE_IDL_H_
70#define _LUSTRE_IDL_H_
71
55f5a824 72#include "../../../include/linux/libcfs/libcfs.h"
bbf00c3d 73#include "../../../include/linux/lnet/types.h"
d7e09d03
PT
74
75/* Defn's shared with user-space. */
1accaadf
GKH
76#include "lustre_user.h"
77#include "lustre_errno.h"
00c0a6ae 78#include "../lustre_ver.h"
2d58de78 79
d7e09d03
PT
80/*
81 * GENERAL STUFF
82 */
83/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
84 * FOO_REPLY_PORTAL is for incoming replies on the FOO
85 * FOO_BULK_PORTAL is for incoming bulk on the FOO
86 */
87
37604896
TL
88/* Lustre service names are following the format
89 * service name + MDT + seq name
90 */
91#define LUSTRE_MDT_MAXNAMELEN 80
92
d7e09d03
PT
93#define CONNMGR_REQUEST_PORTAL 1
94#define CONNMGR_REPLY_PORTAL 2
df693dbe 95/*#define OSC_REQUEST_PORTAL 3 */
d7e09d03 96#define OSC_REPLY_PORTAL 4
df693dbe 97/*#define OSC_BULK_PORTAL 5 */
d7e09d03
PT
98#define OST_IO_PORTAL 6
99#define OST_CREATE_PORTAL 7
100#define OST_BULK_PORTAL 8
df693dbe 101/*#define MDC_REQUEST_PORTAL 9 */
d7e09d03 102#define MDC_REPLY_PORTAL 10
df693dbe 103/*#define MDC_BULK_PORTAL 11 */
d7e09d03 104#define MDS_REQUEST_PORTAL 12
df693dbe 105/*#define MDS_REPLY_PORTAL 13 */
d7e09d03
PT
106#define MDS_BULK_PORTAL 14
107#define LDLM_CB_REQUEST_PORTAL 15
108#define LDLM_CB_REPLY_PORTAL 16
109#define LDLM_CANCEL_REQUEST_PORTAL 17
110#define LDLM_CANCEL_REPLY_PORTAL 18
df693dbe
OD
111/*#define PTLBD_REQUEST_PORTAL 19 */
112/*#define PTLBD_REPLY_PORTAL 20 */
113/*#define PTLBD_BULK_PORTAL 21 */
d7e09d03
PT
114#define MDS_SETATTR_PORTAL 22
115#define MDS_READPAGE_PORTAL 23
a3310525 116#define OUT_PORTAL 24
d7e09d03
PT
117
118#define MGC_REPLY_PORTAL 25
119#define MGS_REQUEST_PORTAL 26
120#define MGS_REPLY_PORTAL 27
121#define OST_REQUEST_PORTAL 28
122#define FLD_REQUEST_PORTAL 29
123#define SEQ_METADATA_PORTAL 30
124#define SEQ_DATA_PORTAL 31
125#define SEQ_CONTROLLER_PORTAL 32
126#define MGS_BULK_PORTAL 33
127
a1e616b0
OD
128/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
129 * n8851@cray.com
130 */
d7e09d03
PT
131
132/* packet types */
133#define PTL_RPC_MSG_REQUEST 4711
134#define PTL_RPC_MSG_ERR 4712
135#define PTL_RPC_MSG_REPLY 4713
136
137/* DON'T use swabbed values of MAGIC as magic! */
d7e09d03 138#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
d7e09d03
PT
139#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
140
141#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
142
143#define PTLRPC_MSG_VERSION 0x00000003
144#define LUSTRE_VERSION_MASK 0xffff0000
145#define LUSTRE_OBD_VERSION 0x00010000
146#define LUSTRE_MDS_VERSION 0x00020000
147#define LUSTRE_OST_VERSION 0x00030000
148#define LUSTRE_DLM_VERSION 0x00040000
149#define LUSTRE_LOG_VERSION 0x00050000
150#define LUSTRE_MGS_VERSION 0x00060000
151
d7e09d03
PT
152/**
153 * Describes a range of sequence, lsr_start is included but lsr_end is
154 * not in the range.
155 * Same structure is used in fld module where lsr_index field holds mdt id
156 * of the home mdt.
157 */
158struct lu_seq_range {
159 __u64 lsr_start;
160 __u64 lsr_end;
161 __u32 lsr_index;
162 __u32 lsr_flags;
163};
164
b78c2b9b 165struct lu_seq_range_array {
166 __u32 lsra_count;
167 __u32 lsra_padding;
168 struct lu_seq_range lsra_lsr[0];
169};
170
d7e09d03
PT
171#define LU_SEQ_RANGE_MDT 0x0
172#define LU_SEQ_RANGE_OST 0x1
173#define LU_SEQ_RANGE_ANY 0x3
174
175#define LU_SEQ_RANGE_MASK 0x3
176
d7e09d03 177/** \defgroup lu_fid lu_fid
a1e616b0
OD
178 * @{
179 */
d7e09d03
PT
180
181/**
182 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
183 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
184 * xattr.
185 */
186enum lma_compat {
c0ac76d9 187 LMAC_HSM = 0x00000001,
a823acf5 188/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */
c0ac76d9
FY
189 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
190 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
a1e616b0
OD
191 * under /O/<seq>/d<x>.
192 */
d7e09d03
PT
193};
194
195/**
196 * Masks for all features that should be supported by a Lustre version to
197 * access a specific file.
198 * This information is stored in lustre_mdt_attrs::lma_incompat.
199 */
200enum lma_incompat {
c03a98b4
FY
201 LMAI_RELEASED = 0x00000001, /* file is released */
202 LMAI_AGENT = 0x00000002, /* agent inode */
203 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
a1e616b0
OD
204 * is on the remote MDT
205 */
d7e09d03 206};
c9f6bb96 207
d7e09d03
PT
208#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
209
d7e09d03
PT
210/**
211 * fid constants
212 */
213enum {
66cc83e9
MP
214 /** LASTID file has zero OID */
215 LUSTRE_FID_LASTID_OID = 0UL,
d7e09d03
PT
216 /** initial fid id value */
217 LUSTRE_FID_INIT_OID = 1UL
218};
219
220/** returns fid object sequence */
221static inline __u64 fid_seq(const struct lu_fid *fid)
222{
223 return fid->f_seq;
224}
225
226/** returns fid object id */
227static inline __u32 fid_oid(const struct lu_fid *fid)
228{
229 return fid->f_oid;
230}
231
232/** returns fid object version */
233static inline __u32 fid_ver(const struct lu_fid *fid)
234{
235 return fid->f_ver;
236}
237
238static inline void fid_zero(struct lu_fid *fid)
239{
240 memset(fid, 0, sizeof(*fid));
241}
242
21aef7d9 243static inline __u64 fid_ver_oid(const struct lu_fid *fid)
d7e09d03
PT
244{
245 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
246}
247
2742c75e
BF
248/* copytool uses a 32b bitmask field to encode archive-Ids during register
249 * with MDT thru kuc.
250 * archive num = 0 => all
251 * archive num from 1 to 32
252 */
253#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
254
d7e09d03
PT
255/**
256 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
257 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
258 * used for other purposes and not risk collisions with existing inodes.
259 *
260 * Different FID Format
25ed6a5e 261 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
d7e09d03
PT
262 */
263enum fid_seq {
264 FID_SEQ_OST_MDT0 = 0,
265 FID_SEQ_LLOG = 1, /* unnamed llogs */
266 FID_SEQ_ECHO = 2,
267 FID_SEQ_OST_MDT1 = 3,
268 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
269 FID_SEQ_LLOG_NAME = 10, /* named llogs */
270 FID_SEQ_RSVD = 11,
271 FID_SEQ_IGIF = 12,
272 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
273 FID_SEQ_IDIF = 0x100000000ULL,
274 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
275 /* Normal FID sequence starts from this value, i.e. 1<<33 */
276 FID_SEQ_START = 0x200000000ULL,
277 /* sequence for local pre-defined FIDs listed in local_oid */
278 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
279 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
280 /* sequence is used for local named objects FIDs generated
a1e616b0
OD
281 * by local_object_storage library
282 */
d7e09d03
PT
283 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
284 /* Because current FLD will only cache the fid sequence, instead
285 * of oid on the client side, if the FID needs to be exposed to
286 * clients sides, it needs to make sure all of fids under one
a1e616b0
OD
287 * sequence will be located in one MDT.
288 */
d7e09d03
PT
289 FID_SEQ_SPECIAL = 0x200000004ULL,
290 FID_SEQ_QUOTA = 0x200000005ULL,
291 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
292 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
293 FID_SEQ_NORMAL = 0x200000400ULL,
294 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
295};
296
297#define OBIF_OID_MAX_BITS 32
298#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
299#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
300#define IDIF_OID_MAX_BITS 48
301#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
302#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
303
304/** OID for FID_SEQ_SPECIAL */
305enum special_oid {
306 /* Big Filesystem Lock to serialize rename operations */
307 FID_OID_SPECIAL_BFL = 1UL,
308};
309
310/** OID for FID_SEQ_DOT_LUSTRE */
311enum dot_lustre_oid {
312 FID_OID_DOT_LUSTRE = 1UL,
313 FID_OID_DOT_LUSTRE_OBF = 2UL,
314};
315
d8f183b3 316static inline bool fid_seq_is_mdt0(__u64 seq)
d7e09d03
PT
317{
318 return (seq == FID_SEQ_OST_MDT0);
319}
320
d8f183b3 321static inline bool fid_seq_is_mdt(__u64 seq)
d7e09d03
PT
322{
323 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
324};
325
d8f183b3 326static inline bool fid_seq_is_echo(__u64 seq)
d7e09d03
PT
327{
328 return (seq == FID_SEQ_ECHO);
329}
330
d8f183b3 331static inline bool fid_is_echo(const struct lu_fid *fid)
d7e09d03
PT
332{
333 return fid_seq_is_echo(fid_seq(fid));
334}
335
d8f183b3 336static inline bool fid_seq_is_llog(__u64 seq)
d7e09d03
PT
337{
338 return (seq == FID_SEQ_LLOG);
339}
340
d8f183b3 341static inline bool fid_is_llog(const struct lu_fid *fid)
d7e09d03 342{
66cc83e9
MP
343 /* file with OID == 0 is not llog but contains last oid */
344 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
d7e09d03
PT
345}
346
d8f183b3 347static inline bool fid_seq_is_rsvd(__u64 seq)
d7e09d03
PT
348{
349 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
350};
351
d8f183b3 352static inline bool fid_seq_is_special(__u64 seq)
d7e09d03
PT
353{
354 return seq == FID_SEQ_SPECIAL;
355};
356
d8f183b3 357static inline bool fid_seq_is_local_file(__u64 seq)
d7e09d03
PT
358{
359 return seq == FID_SEQ_LOCAL_FILE ||
360 seq == FID_SEQ_LOCAL_NAME;
361};
362
d8f183b3 363static inline bool fid_seq_is_root(__u64 seq)
d7e09d03
PT
364{
365 return seq == FID_SEQ_ROOT;
366}
367
d8f183b3 368static inline bool fid_seq_is_dot(__u64 seq)
d7e09d03
PT
369{
370 return seq == FID_SEQ_DOT_LUSTRE;
371}
372
d8f183b3 373static inline bool fid_seq_is_default(__u64 seq)
d7e09d03
PT
374{
375 return seq == FID_SEQ_LOV_DEFAULT;
376}
377
d8f183b3 378static inline bool fid_is_mdt0(const struct lu_fid *fid)
d7e09d03
PT
379{
380 return fid_seq_is_mdt0(fid_seq(fid));
381}
382
383static inline void lu_root_fid(struct lu_fid *fid)
384{
385 fid->f_seq = FID_SEQ_ROOT;
386 fid->f_oid = 1;
387 fid->f_ver = 0;
388}
389
390/**
391 * Check if a fid is igif or not.
392 * \param fid the fid to be tested.
393 * \return true if the fid is a igif; otherwise false.
394 */
d8f183b3 395static inline bool fid_seq_is_igif(__u64 seq)
d7e09d03
PT
396{
397 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
398}
399
d8f183b3 400static inline bool fid_is_igif(const struct lu_fid *fid)
d7e09d03
PT
401{
402 return fid_seq_is_igif(fid_seq(fid));
403}
404
405/**
406 * Check if a fid is idif or not.
407 * \param fid the fid to be tested.
408 * \return true if the fid is a idif; otherwise false.
409 */
d8f183b3 410static inline bool fid_seq_is_idif(__u64 seq)
d7e09d03
PT
411{
412 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
413}
414
d8f183b3 415static inline bool fid_is_idif(const struct lu_fid *fid)
d7e09d03
PT
416{
417 return fid_seq_is_idif(fid_seq(fid));
418}
419
d8f183b3 420static inline bool fid_is_local_file(const struct lu_fid *fid)
d7e09d03
PT
421{
422 return fid_seq_is_local_file(fid_seq(fid));
423}
424
d8f183b3 425static inline bool fid_seq_is_norm(__u64 seq)
d7e09d03
PT
426{
427 return (seq >= FID_SEQ_NORMAL);
428}
429
d8f183b3 430static inline bool fid_is_norm(const struct lu_fid *fid)
d7e09d03
PT
431{
432 return fid_seq_is_norm(fid_seq(fid));
433}
434
435/* convert an OST objid into an IDIF FID SEQ number */
21aef7d9 436static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
d7e09d03
PT
437{
438 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
439}
440
441/* convert a packed IDIF FID into an OST objid */
21aef7d9 442static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
d7e09d03
PT
443{
444 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
445}
446
447/* extract ost index from IDIF FID */
448static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
449{
d7e09d03
PT
450 return (fid_seq(fid) >> 16) & 0xffff;
451}
452
453/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
21aef7d9 454static inline __u64 ostid_seq(const struct ost_id *ostid)
d7e09d03
PT
455{
456 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
457 return FID_SEQ_OST_MDT0;
458
22144626 459 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
d7e09d03
PT
460 return FID_SEQ_LOV_DEFAULT;
461
462 if (fid_is_idif(&ostid->oi_fid))
463 return FID_SEQ_OST_MDT0;
464
465 return fid_seq(&ostid->oi_fid);
466}
467
468/* extract OST objid from a wire ost_id (id/seq) pair */
21aef7d9 469static inline __u64 ostid_id(const struct ost_id *ostid)
d7e09d03 470{
22144626 471 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
d7e09d03
PT
472 return ostid->oi.oi_id & IDIF_OID_MASK;
473
22144626
FY
474 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
475 return ostid->oi.oi_id;
476
d7e09d03
PT
477 if (fid_is_idif(&ostid->oi_fid))
478 return fid_idif_id(fid_seq(&ostid->oi_fid),
479 fid_oid(&ostid->oi_fid), 0);
480
481 return fid_oid(&ostid->oi_fid);
482}
483
484static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
485{
486 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
487 oi->oi.oi_seq = seq;
488 } else {
489 oi->oi_fid.f_seq = seq;
490 /* Note: if f_oid + f_ver is zero, we need init it
491 * to be 1, otherwise, ostid_seq will treat this
a1e616b0
OD
492 * as old ostid (oi_seq == 0)
493 */
d7e09d03
PT
494 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
495 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
496 }
497}
498
499static inline void ostid_set_seq_mdt0(struct ost_id *oi)
500{
501 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
502}
503
504static inline void ostid_set_seq_echo(struct ost_id *oi)
505{
506 ostid_set_seq(oi, FID_SEQ_ECHO);
507}
508
509static inline void ostid_set_seq_llog(struct ost_id *oi)
510{
511 ostid_set_seq(oi, FID_SEQ_LLOG);
512}
513
514/**
515 * Note: we need check oi_seq to decide where to set oi_id,
516 * so oi_seq should always be set ahead of oi_id.
517 */
518static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
519{
22144626 520 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
d7e09d03 521 if (oid >= IDIF_MAX_OID) {
25782b53
AD
522 CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
523 oid, POSTID(oi));
d7e09d03
PT
524 return;
525 }
526 oi->oi.oi_id = oid;
22144626
FY
527 } else if (fid_is_idif(&oi->oi_fid)) {
528 if (oid >= IDIF_MAX_OID) {
25782b53 529 CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
22144626
FY
530 oid, POSTID(oi));
531 return;
532 }
533 oi->oi_fid.f_seq = fid_idif_seq(oid,
534 fid_idif_ost_idx(&oi->oi_fid));
535 oi->oi_fid.f_oid = oid;
536 oi->oi_fid.f_ver = oid >> 48;
d7e09d03 537 } else {
a5c954ef 538 if (oid >= OBIF_MAX_OID) {
10457d4b 539 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
d7e09d03
PT
540 return;
541 }
542 oi->oi_fid.f_oid = oid;
543 }
544}
545
22144626 546static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
d7e09d03 547{
22144626
FY
548 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
549 CERROR("bad IGIF, "DFID"\n", PFID(fid));
550 return -EBADF;
551 }
552
553 if (fid_is_idif(fid)) {
554 if (oid >= IDIF_MAX_OID) {
25782b53 555 CERROR("Too large OID %#llx to set IDIF " DFID "\n",
22144626
FY
556 (unsigned long long)oid, PFID(fid));
557 return -EBADF;
d7e09d03 558 }
22144626
FY
559 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
560 fid->f_oid = oid;
561 fid->f_ver = oid >> 48;
d7e09d03 562 } else {
a5c954ef 563 if (oid >= OBIF_MAX_OID) {
25782b53 564 CERROR("Too large OID %#llx to set REG " DFID "\n",
22144626
FY
565 (unsigned long long)oid, PFID(fid));
566 return -EBADF;
567 }
568 fid->f_oid = oid;
d7e09d03 569 }
22144626 570 return 0;
d7e09d03
PT
571}
572
573/**
574 * Unpack an OST object id/seq (group) into a FID. This is needed for
575 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
576 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
577 * be passed through unchanged. Only legacy OST objects in "group 0"
578 * will be mapped into the IDIF namespace so that they can fit into the
579 * struct lu_fid fields without loss. For reference see:
25ed6a5e 580 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
d7e09d03
PT
581 */
582static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
583 __u32 ost_idx)
584{
22144626
FY
585 __u64 seq = ostid_seq(ostid);
586
d7e09d03
PT
587 if (ost_idx > 0xffff) {
588 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
589 ost_idx);
590 return -EBADF;
591 }
592
22144626
FY
593 if (fid_seq_is_mdt0(seq)) {
594 __u64 oid = ostid_id(ostid);
595
d7e09d03
PT
596 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
597 * that we map into the IDIF namespace. It allows up to 2^48
598 * objects per OST, as this is the object namespace that has
599 * been in production for years. This can handle create rates
a1e616b0
OD
600 * of 1M objects/s/OST for 9 years, or combinations thereof.
601 */
22144626 602 if (oid >= IDIF_MAX_OID) {
defa220f
OD
603 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
604 POSTID(ostid), ost_idx);
605 return -EBADF;
d7e09d03 606 }
22144626 607 fid->f_seq = fid_idif_seq(oid, ost_idx);
d7e09d03 608 /* truncate to 32 bits by assignment */
22144626 609 fid->f_oid = oid;
d7e09d03 610 /* in theory, not currently used */
22144626
FY
611 fid->f_ver = oid >> 48;
612 } else if (likely(!fid_seq_is_default(seq))) {
d7e09d03
PT
613 /* This is either an IDIF object, which identifies objects across
614 * all OSTs, or a regular FID. The IDIF namespace maps legacy
615 * OST objects into the FID namespace. In both cases, we just
a1e616b0
OD
616 * pass the FID through, no conversion needed.
617 */
d7e09d03 618 if (ostid->oi_fid.f_ver != 0) {
10457d4b
OD
619 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
620 POSTID(ostid), ost_idx);
d7e09d03
PT
621 return -EBADF;
622 }
623 *fid = ostid->oi_fid;
624 }
625
626 return 0;
627}
628
629/* pack any OST FID into an ostid (id/seq) for the wire/disk */
630static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
631{
632 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
633 CERROR("bad IGIF, "DFID"\n", PFID(fid));
634 return -EBADF;
635 }
636
637 if (fid_is_idif(fid)) {
638 ostid_set_seq_mdt0(ostid);
639 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
640 fid_ver(fid)));
641 } else {
642 ostid->oi_fid = *fid;
643 }
644
645 return 0;
646}
647
648/* Check whether the fid is for LAST_ID */
d8f183b3 649static inline bool fid_is_last_id(const struct lu_fid *fid)
d7e09d03 650{
66cc83e9 651 return (fid_oid(fid) == 0);
d7e09d03
PT
652}
653
654/**
655 * Get inode number from a igif.
656 * \param fid a igif to get inode number from.
657 * \return inode number for the igif.
658 */
659static inline ino_t lu_igif_ino(const struct lu_fid *fid)
660{
661 return fid_seq(fid);
662}
663
d7e09d03
PT
664/**
665 * Get inode generation from a igif.
666 * \param fid a igif to get inode generation from.
667 * \return inode generation for the igif.
668 */
669static inline __u32 lu_igif_gen(const struct lu_fid *fid)
670{
671 return fid_oid(fid);
672}
673
674/**
675 * Build igif from the inode number/generation.
676 */
677static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
678{
679 fid->f_seq = ino;
680 fid->f_oid = gen;
681 fid->f_ver = 0;
682}
683
684/*
685 * Fids are transmitted across network (in the sender byte-ordering),
686 * and stored on disk in big-endian order.
687 */
688static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
689{
d7e09d03
PT
690 dst->f_seq = cpu_to_le64(fid_seq(src));
691 dst->f_oid = cpu_to_le32(fid_oid(src));
692 dst->f_ver = cpu_to_le32(fid_ver(src));
693}
694
695static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
696{
d7e09d03
PT
697 dst->f_seq = le64_to_cpu(fid_seq(src));
698 dst->f_oid = le32_to_cpu(fid_oid(src));
699 dst->f_ver = le32_to_cpu(fid_ver(src));
700}
701
702static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
703{
d7e09d03
PT
704 dst->f_seq = cpu_to_be64(fid_seq(src));
705 dst->f_oid = cpu_to_be32(fid_oid(src));
706 dst->f_ver = cpu_to_be32(fid_ver(src));
707}
708
709static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
710{
d7e09d03
PT
711 dst->f_seq = be64_to_cpu(fid_seq(src));
712 dst->f_oid = be32_to_cpu(fid_oid(src));
713 dst->f_ver = be32_to_cpu(fid_ver(src));
714}
715
d8f183b3 716static inline bool fid_is_sane(const struct lu_fid *fid)
d7e09d03 717{
d2a13989 718 return fid &&
d7e09d03
PT
719 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
720 fid_is_igif(fid) || fid_is_idif(fid) ||
721 fid_seq_is_rsvd(fid_seq(fid)));
722}
723
d8f183b3 724static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
d7e09d03 725{
ec83e611 726 return memcmp(f0, f1, sizeof(*f0)) == 0;
d7e09d03
PT
727}
728
729#define __diff_normalize(val0, val1) \
730({ \
731 typeof(val0) __val0 = (val0); \
732 typeof(val1) __val1 = (val1); \
733 \
b2952d62 734 (__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1); \
d7e09d03
PT
735})
736
737static inline int lu_fid_cmp(const struct lu_fid *f0,
738 const struct lu_fid *f1)
739{
740 return
741 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
742 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
743 __diff_normalize(fid_ver(f0), fid_ver(f1));
744}
745
c5b60ba7 746static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
d7e09d03
PT
747 struct ost_id *dst_oi)
748{
749 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
750 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
751 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
752 } else {
753 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
754 }
755}
756
c5b60ba7 757static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
d7e09d03
PT
758 struct ost_id *dst_oi)
759{
760 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
761 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
762 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
763 } else {
764 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
765 }
766}
767
768/** @} lu_fid */
769
770/** \defgroup lu_dir lu_dir
a1e616b0
OD
771 * @{
772 */
d7e09d03
PT
773
774/**
775 * Enumeration of possible directory entry attributes.
776 *
777 * Attributes follow directory entry header in the order they appear in this
778 * enumeration.
779 */
780enum lu_dirent_attrs {
781 LUDA_FID = 0x0001,
782 LUDA_TYPE = 0x0002,
783 LUDA_64BITHASH = 0x0004,
d7e09d03
PT
784};
785
d7e09d03
PT
786/**
787 * Layout of readdir pages, as transmitted on wire.
788 */
789struct lu_dirent {
790 /** valid if LUDA_FID is set. */
791 struct lu_fid lde_fid;
792 /** a unique entry identifier: a hash or an offset. */
793 __u64 lde_hash;
794 /** total record length, including all attributes. */
795 __u16 lde_reclen;
796 /** name length */
797 __u16 lde_namelen;
798 /** optional variable size attributes following this entry.
799 * taken from enum lu_dirent_attrs.
800 */
801 __u32 lde_attrs;
802 /** name is followed by the attributes indicated in ->ldp_attrs, in
803 * their natural order. After the last attribute, padding bytes are
804 * added to make ->lde_reclen a multiple of 8.
805 */
806 char lde_name[0];
807};
808
809/*
810 * Definitions of optional directory entry attributes formats.
811 *
812 * Individual attributes do not have their length encoded in a generic way. It
813 * is assumed that consumer of an attribute knows its format. This means that
814 * it is impossible to skip over an unknown attribute, except by skipping over all
815 * remaining attributes (by using ->lde_reclen), which is not too
816 * constraining, because new server versions will append new attributes at
817 * the end of an entry.
818 */
819
820/**
821 * Fid directory attribute: a fid of an object referenced by the entry. This
822 * will be almost always requested by the client and supplied by the server.
823 *
824 * Aligned to 8 bytes.
825 */
826/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
827
828/**
829 * File type.
830 *
831 * Aligned to 2 bytes.
832 */
833struct luda_type {
834 __u16 lt_type;
835};
836
79783924
PT
837#ifndef IFSHIFT
838#define IFSHIFT 12
839#endif
840
841#ifndef IFTODT
842#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
843#endif
844#ifndef DTTOIF
845#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
846#endif
847
d7e09d03
PT
848struct lu_dirpage {
849 __u64 ldp_hash_start;
850 __u64 ldp_hash_end;
851 __u32 ldp_flags;
852 __u32 ldp_pad0;
853 struct lu_dirent ldp_entries[0];
854};
855
856enum lu_dirpage_flags {
857 /**
858 * dirpage contains no entry.
859 */
860 LDF_EMPTY = 1 << 0,
861 /**
862 * last entry's lde_hash equals ldp_hash_end.
863 */
864 LDF_COLLIDE = 1 << 1
865};
866
867static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
868{
869 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
870 return NULL;
871 else
872 return dp->ldp_entries;
873}
874
875static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
876{
877 struct lu_dirent *next;
878
879 if (le16_to_cpu(ent->lde_reclen) != 0)
880 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
881 else
882 next = NULL;
883
884 return next;
885}
886
8060c901 887static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
d7e09d03 888{
8060c901 889 size_t size;
d7e09d03
PT
890
891 if (attr & LUDA_TYPE) {
8060c901 892 const size_t align = sizeof(struct luda_type) - 1;
50ffcb7e 893
d7e09d03
PT
894 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
895 size += sizeof(struct luda_type);
da5ecb4d 896 } else {
d7e09d03 897 size = sizeof(struct lu_dirent) + namelen;
da5ecb4d 898 }
d7e09d03
PT
899
900 return (size + 7) & ~7;
901}
902
d7e09d03
PT
903#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
904
905/**
906 * MDS_READPAGE page size
907 *
908 * This is the directory page size packed in MDS_READPAGE RPC.
ea1754a0 909 * It's different than PAGE_SIZE because the client needs to
d7e09d03
PT
910 * access the struct lu_dirpage header packed at the beginning of
911 * the "page" and without this there isn't any way to know find the
ea1754a0 912 * lu_dirpage header is if client and server PAGE_SIZE differ.
d7e09d03
PT
913 */
914#define LU_PAGE_SHIFT 12
915#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
916#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
917
09cbfeaf 918#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
d7e09d03
PT
919
920/** @} lu_dir */
921
922struct lustre_handle {
923 __u64 cookie;
924};
c9f6bb96 925
d7e09d03
PT
926#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
927
d8f183b3 928static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
d7e09d03
PT
929{
930 return lh->cookie != 0ull;
931}
932
d8f183b3
JH
933static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
934 const struct lustre_handle *lh2)
d7e09d03
PT
935{
936 return lh1->cookie == lh2->cookie;
937}
938
939static inline void lustre_handle_copy(struct lustre_handle *tgt,
ac8f0a5c 940 const struct lustre_handle *src)
d7e09d03
PT
941{
942 tgt->cookie = src->cookie;
943}
944
945/* flags for lm_flags */
946#define MSGHDR_AT_SUPPORT 0x1
947#define MSGHDR_CKSUM_INCOMPAT18 0x2
948
949#define lustre_msg lustre_msg_v2
950/* we depend on this structure to be 8-byte aligned */
951/* this type is only endian-adjusted in lustre_unpack_msg() */
952struct lustre_msg_v2 {
953 __u32 lm_bufcount;
954 __u32 lm_secflvr;
955 __u32 lm_magic;
956 __u32 lm_repsize;
957 __u32 lm_cksum;
958 __u32 lm_flags;
959 __u32 lm_padding_2;
960 __u32 lm_padding_3;
961 __u32 lm_buflens[0];
962};
963
964/* without gss, ptlrpc_body is put at the first buffer. */
965#define PTLRPC_NUM_VERSIONS 4
c9fe1f7f 966
d7e09d03
PT
967struct ptlrpc_body_v3 {
968 struct lustre_handle pb_handle;
969 __u32 pb_type;
970 __u32 pb_version;
971 __u32 pb_opc;
972 __u32 pb_status;
e33115e3
GP
973 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
974 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
975 __u16 pb_padding0;
976 __u32 pb_padding1;
d7e09d03
PT
977 __u64 pb_last_committed;
978 __u64 pb_transno;
979 __u32 pb_flags;
980 __u32 pb_op_flags;
981 __u32 pb_conn_cnt;
982 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
983 __u32 pb_service_time; /* for rep, actual service time */
984 __u32 pb_limit;
985 __u64 pb_slv;
986 /* VBR: pre-versions */
987 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
8bcaef92 988 __u64 pb_mbits; /**< match bits for bulk request */
d7e09d03 989 /* padding for future needs */
8bcaef92
LZ
990 __u64 pb_padding64_0;
991 __u64 pb_padding64_1;
992 __u64 pb_padding64_2;
c9fe1f7f 993 char pb_jobid[LUSTRE_JOBID_SIZE];
d7e09d03 994};
c9f6bb96 995
d7e09d03
PT
996#define ptlrpc_body ptlrpc_body_v3
997
998struct ptlrpc_body_v2 {
999 struct lustre_handle pb_handle;
1000 __u32 pb_type;
1001 __u32 pb_version;
1002 __u32 pb_opc;
1003 __u32 pb_status;
e33115e3
GP
1004 __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
1005 __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
1006 __u16 pb_padding0;
1007 __u32 pb_padding1;
d7e09d03
PT
1008 __u64 pb_last_committed;
1009 __u64 pb_transno;
1010 __u32 pb_flags;
1011 __u32 pb_op_flags;
1012 __u32 pb_conn_cnt;
1013 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1014 __u32 pb_service_time; /* for rep, actual service time, also used for
a1e616b0
OD
1015 * net_latency of req
1016 */
d7e09d03
PT
1017 __u32 pb_limit;
1018 __u64 pb_slv;
1019 /* VBR: pre-versions */
1020 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
8bcaef92 1021 __u64 pb_mbits; /**< unused in V2 */
d7e09d03 1022 /* padding for future needs */
8bcaef92
LZ
1023 __u64 pb_padding64_0;
1024 __u64 pb_padding64_1;
1025 __u64 pb_padding64_2;
d7e09d03
PT
1026};
1027
d7e09d03
PT
1028/* message body offset for lustre_msg_v2 */
1029/* ptlrpc body offset in all request/reply messages */
1030#define MSG_PTLRPC_BODY_OFF 0
1031
1032/* normal request/reply message record offset */
1033#define REQ_REC_OFF 1
1034#define REPLY_REC_OFF 1
1035
1036/* ldlm request message body offset */
1037#define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1038#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1039
1040/* ldlm intent lock message body offset */
1041#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1042#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1043
1044/* ldlm reply message body offset */
1045#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1046#define DLM_REPLY_REC_OFF 2 /* reply record offset */
1047
1048/** only use in req->rq_{req,rep}_swab_mask */
1049#define MSG_PTLRPC_HEADER_OFF 31
1050
1051/* Flags that are operation-specific go in the top 16 bits. */
1052#define MSG_OP_FLAG_MASK 0xffff0000
1053#define MSG_OP_FLAG_SHIFT 16
1054
1055/* Flags that apply to all requests are in the bottom 16 bits */
1056#define MSG_GEN_FLAG_MASK 0x0000ffff
1057#define MSG_LAST_REPLAY 0x0001
1058#define MSG_RESENT 0x0002
1059#define MSG_REPLAY 0x0004
1060/* #define MSG_AT_SUPPORT 0x0008
1061 * This was used in early prototypes of adaptive timeouts, and while there
1062 * shouldn't be any users of that code there also isn't a need for using this
a1e616b0
OD
1063 * bits. Defer usage until at least 1.10 to avoid potential conflict.
1064 */
d7e09d03
PT
1065#define MSG_DELAY_REPLAY 0x0010
1066#define MSG_VERSION_REPLAY 0x0020
1067#define MSG_REQ_REPLAY_DONE 0x0040
1068#define MSG_LOCK_REPLAY_DONE 0x0080
1069
1070/*
1071 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1072 */
1073
1074#define MSG_CONNECT_RECOVERING 0x00000001
1075#define MSG_CONNECT_RECONNECT 0x00000002
1076#define MSG_CONNECT_REPLAYABLE 0x00000004
df693dbe 1077/*#define MSG_CONNECT_PEER 0x8 */
d7e09d03
PT
1078#define MSG_CONNECT_LIBCLIENT 0x00000010
1079#define MSG_CONNECT_INITIAL 0x00000020
1080#define MSG_CONNECT_ASYNC 0x00000040
1081#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1082#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1083
1084/* Connect flags */
a1e616b0
OD
1085#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1086#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1087#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1088#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1089#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1090#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1091#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1092#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1093#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
d7e09d03 1094#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
a1e616b0
OD
1095#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1096#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1097#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
d7e09d03
PT
1098#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1099 *We do not support JOIN FILE
1100 *anymore, reserve this flags
1101 *just for preventing such bit
a1e616b0
OD
1102 *to be reused.
1103 */
1104#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1105#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
341f1f0a
FY
1106#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used
1107 * in production. Removed in
1108 * 2.9. Keep this flag to
1109 * avoid reuse.
1110 */
1111#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force,
1112 * never used in production.
1113 * Removed in 2.9. Keep this
1114 * flag to avoid reuse
1115 */
a1e616b0
OD
1116#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1117#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1118#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1119#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1120#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1121#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1122#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
d7e09d03 1123#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
a1e616b0 1124#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
d7e09d03
PT
1125#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1126#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
a1e616b0
OD
1127#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1128#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1129#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1130#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
d7e09d03
PT
1131#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1132#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1133#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1134#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1135#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1136#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
a1e616b0
OD
1137 * directory hash
1138 */
d7e09d03
PT
1139#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1140#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1141#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1142#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1143#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
a1e616b0
OD
1144 * RPC error properly
1145 */
d7e09d03 1146#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
a1e616b0
OD
1147 * finer space reservation
1148 */
d7e09d03 1149#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
a1e616b0
OD
1150 * policy and 2.x server
1151 */
d7e09d03
PT
1152#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1153#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1154#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1155#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1156#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
69342b78 1157#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
63d42578 1158#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
98f2ec61
LS
1159#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1160 * name in request
1161 */
865b734e 1162#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
ac094f38 1163#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
872cbbdf
GP
1164#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
1165 * RPCs in parallel
1166 */
4edc630a 1167#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
9ed8e66a 1168#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
0b83bc16 1169#define OBD_CONNECT_LOCK_AHEAD 0x1000000000000000ULL /* lock ahead */
8bcaef92
LZ
1170/** bulk matchbits is sent within ptlrpc_body */
1171#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
59b6693a 1172#define OBD_CONNECT_OBDOPACK 0x4000000000000000ULL /* compact OUT obdo */
3ce173a1 1173#define OBD_CONNECT_FLAGS2 0x8000000000000000ULL /* second flags word */
69342b78 1174
d7e09d03
PT
1175/* XXX README XXX:
1176 * Please DO NOT add flag values here before first ensuring that this same
1177 * flag value is not in use on some other branch. Please clear any such
1178 * changes with senior engineers before starting to use a new flag. Then,
1179 * submit a small patch against EVERY branch that ONLY adds the new flag,
1180 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1181 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
faf2ca92
OD
1182 * can be approved and landed easily to reserve the flag for future use.
1183 */
d7e09d03
PT
1184
1185/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1186 * connection. It is a temporary bug fix for Imperative Recovery interop
1187 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
faf2ca92
OD
1188 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
1189 */
d7e09d03
PT
1190#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1191
1192#define OCD_HAS_FLAG(ocd, flg) \
1193 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1194
d7e09d03
PT
1195/* Features required for this version of the client to work with server */
1196#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1197 OBD_CONNECT_FULL20)
1198
d7e09d03
PT
1199/* This structure is used for both request and reply.
1200 *
1201 * If we eventually have separate connect data for different types, which we
a1e616b0
OD
1202 * almost certainly will, then perhaps we stick a union in here.
1203 */
d7e09d03
PT
1204struct obd_connect_data {
1205 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1206 __u32 ocd_version; /* lustre release version number */
1207 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1208 __u32 ocd_index; /* LOV index to connect to */
1209 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1210 __u64 ocd_ibits_known; /* inode bits this client understands */
1211 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1212 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1213 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
a1e616b0 1214 __u32 ocd_unused; /* also fix lustre_swab_connect */
d7e09d03
PT
1215 __u64 ocd_transno; /* first transno from client to be replayed */
1216 __u32 ocd_group; /* MDS group on OST */
1217 __u32 ocd_cksum_types; /* supported checksum algorithms */
1218 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1219 __u32 ocd_instance; /* instance # of this target */
1220 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1221 /* Fields after ocd_maxbytes are only accessible by the receiver
1222 * if the corresponding flag in ocd_connect_flags is set. Accessing
1223 * any field after ocd_maxbytes on the receiver without a valid flag
a1e616b0
OD
1224 * may result in out-of-bound memory access and kernel oops.
1225 */
872cbbdf
GP
1226 __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
1227 __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */
1228 __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */
3ce173a1 1229 __u64 ocd_connect_flags2;
d7e09d03
PT
1230 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1231 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1232 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1233 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1234 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1235 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1236 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1237 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1238 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1239 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1240 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1241 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1242 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1243};
c9f6bb96 1244
d7e09d03
PT
1245/* XXX README XXX:
1246 * Please DO NOT use any fields here before first ensuring that this same
1247 * field is not in use on some other branch. Please clear any such changes
1248 * with senior engineers before starting to use a new field. Then, submit
1249 * a small patch against EVERY branch that ONLY adds the new field along with
1250 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
a1e616b0
OD
1251 * reserve the flag for future use.
1252 */
d7e09d03 1253
d7e09d03
PT
1254/*
1255 * Supported checksum algorithms. Up to 32 checksum types are supported.
1256 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1257 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1258 * algorithm and also the OBD_FL_CKSUM* flags.
1259 */
d133210f 1260enum cksum_type {
b2952d62
MR
1261 OBD_CKSUM_CRC32 = 0x00000001,
1262 OBD_CKSUM_ADLER = 0x00000002,
1263 OBD_CKSUM_CRC32C = 0x00000004,
d133210f 1264};
d7e09d03
PT
1265
1266/*
1267 * OST requests: OBDO & OBD request records
1268 */
1269
1270/* opcodes */
efede67c 1271enum ost_cmd {
d7e09d03
PT
1272 OST_REPLY = 0, /* reply ? */
1273 OST_GETATTR = 1,
1274 OST_SETATTR = 2,
1275 OST_READ = 3,
1276 OST_WRITE = 4,
1277 OST_CREATE = 5,
1278 OST_DESTROY = 6,
1279 OST_GET_INFO = 7,
1280 OST_CONNECT = 8,
1281 OST_DISCONNECT = 9,
1282 OST_PUNCH = 10,
1283 OST_OPEN = 11,
1284 OST_CLOSE = 12,
1285 OST_STATFS = 13,
1286 OST_SYNC = 16,
1287 OST_SET_INFO = 17,
e57721e7 1288 OST_QUOTACHECK = 18, /* not used since 2.4 */
d7e09d03
PT
1289 OST_QUOTACTL = 19,
1290 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1291 OST_LAST_OPC
efede67c 1292};
d7e09d03
PT
1293#define OST_FIRST_OPC OST_REPLY
1294
1295enum obdo_flags {
1296 OBD_FL_INLINEDATA = 0x00000001,
1297 OBD_FL_OBDMDEXISTS = 0x00000002,
1298 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
a1e616b0 1299 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
d7e09d03 1300 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
b2952d62 1301 OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
d7e09d03
PT
1302 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1303 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1304 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1305 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1306 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1307 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1308 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1309 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1310 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1311 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1312 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
a1e616b0 1313 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
d7e09d03 1314 * XXX: obsoleted - reserved for old
a1e616b0
OD
1315 * clients prior than 2.2
1316 */
d7e09d03
PT
1317 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1318 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
e1798006 1319 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
8bd3efb7 1320 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
d7e09d03
PT
1321
1322 /* Note that while these checksum values are currently separate bits,
a1e616b0
OD
1323 * in 2.x we can actually allow all values from 1-31 if we wanted.
1324 */
d7e09d03
PT
1325 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1326 OBD_FL_CKSUM_CRC32C,
1327
1328 /* mask for local-only flag, which won't be sent over network */
1329 OBD_FL_LOCAL_MASK = 0xF0000000,
1330};
1331
afebe4a5
FY
1332/*
1333 * All LOV EA magics should have the same postfix, if some new version
1334 * Lustre instroduces new LOV EA magic, then when down-grade to an old
1335 * Lustre, even though the old version system does not recognizes such
1336 * new magic, it still can distinguish the corrupted cases by checking
1337 * the magic's postfix.
1338 */
1339#define LOV_MAGIC_MAGIC 0x0BD0
1340#define LOV_MAGIC_MASK 0xFFFF
1341
1342#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
1343#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
1344#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
1345#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
dbf789ce
JX
1346/* reserved for specifying OSTs */
1347#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
afebe4a5 1348#define LOV_MAGIC LOV_MAGIC_V1
d7e09d03
PT
1349
1350/*
1351 * magic for fully defined striping
1352 * the idea is that we should have different magics for striping "hints"
1353 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1354 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1355 * we can't just change it w/o long way preparation, but we still need a
1356 * mechanism to allow LOD to differentiate hint versus ready striping.
1357 * so, at the moment we do a trick: MDT knows what to expect from request
1358 * depending on the case (replay uses ready striping, non-replay req uses
1359 * hints), so MDT replaces magic with appropriate one and now LOD can
1360 * easily understand what's inside -bzzz
1361 */
1362#define LOV_MAGIC_V1_DEF 0x0CD10BD0
1363#define LOV_MAGIC_V3_DEF 0x0CD30BD0
1364
5dd16419
JX
1365#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1366#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
d7e09d03
PT
1367
1368#define lov_ost_data lov_ost_data_v1
1369struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1370 struct ost_id l_ost_oi; /* OST object ID */
1371 __u32 l_ost_gen; /* generation of this l_ost_idx */
1372 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1373};
1374
1375#define lov_mds_md lov_mds_md_v1
1376struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1377 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1378 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1379 struct ost_id lmm_oi; /* LOV object ID */
1380 __u32 lmm_stripe_size; /* size of stripe in bytes */
1381 /* lmm_stripe_count used to be __u32 */
1382 __u16 lmm_stripe_count; /* num stripes in use for this object */
1383 __u16 lmm_layout_gen; /* layout generation number */
1384 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1385};
1386
1387/**
1388 * Sigh, because pre-2.4 uses
1389 * struct lov_mds_md_v1 {
1390 * ........
1391 * __u64 lmm_object_id;
1392 * __u64 lmm_object_seq;
1393 * ......
1394 * }
1395 * to identify the LOV(MDT) object, and lmm_object_seq will
1396 * be normal_fid, which make it hard to combine these conversion
1397 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1398 *
1399 * We can tell the lmm_oi by this way,
1400 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1401 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1402 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1403 * lmm_oi.f_ver = 0
1404 *
1405 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1406 * except for printing some information, and the user can always
1407 * get the real FID from LMA, besides this multiple case check might
1408 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1409 */
1410
1411static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1412 struct ost_id *oi)
1413{
1414 oi->oi.oi_id = fid_oid(fid);
1415 oi->oi.oi_seq = fid_seq(fid);
1416}
1417
1418static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1419{
1420 oi->oi.oi_seq = seq;
1421}
1422
22144626
FY
1423static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1424{
1425 oi->oi.oi_id = oid;
1426}
1427
ac8f0a5c 1428static inline __u64 lmm_oi_id(const struct ost_id *oi)
d7e09d03
PT
1429{
1430 return oi->oi.oi_id;
1431}
1432
ac8f0a5c 1433static inline __u64 lmm_oi_seq(const struct ost_id *oi)
d7e09d03
PT
1434{
1435 return oi->oi.oi_seq;
1436}
1437
1438static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
ac8f0a5c 1439 const struct ost_id *src_oi)
d7e09d03
PT
1440{
1441 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1442 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1443}
1444
1445static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
ac8f0a5c 1446 const struct ost_id *src_oi)
d7e09d03
PT
1447{
1448 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1449 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1450}
1451
ec83e611
JP
1452#define MAX_MD_SIZE \
1453 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1454#define MIN_MD_SIZE \
1455 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
d7e09d03
PT
1456
1457#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1458#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1459#define XATTR_USER_PREFIX "user."
1460#define XATTR_TRUSTED_PREFIX "trusted."
1461#define XATTR_SECURITY_PREFIX "security."
1462#define XATTR_LUSTRE_PREFIX "lustre."
1463
1464#define XATTR_NAME_LOV "trusted.lov"
1465#define XATTR_NAME_LMA "trusted.lma"
1466#define XATTR_NAME_LMV "trusted.lmv"
2de35386 1467#define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
d7e09d03
PT
1468#define XATTR_NAME_LINK "trusted.link"
1469#define XATTR_NAME_FID "trusted.fid"
1470#define XATTR_NAME_VERSION "trusted.version"
1471#define XATTR_NAME_SOM "trusted.som"
1472#define XATTR_NAME_HSM "trusted.hsm"
1473#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1474
1475struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1476 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1477 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1478 struct ost_id lmm_oi; /* LOV object ID */
1479 __u32 lmm_stripe_size; /* size of stripe in bytes */
1480 /* lmm_stripe_count used to be __u32 */
1481 __u16 lmm_stripe_count; /* num stripes in use for this object */
1482 __u16 lmm_layout_gen; /* layout generation number */
aaf06e29 1483 char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
d7e09d03
PT
1484 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1485};
1486
bc06a678 1487static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1488{
1489 if (lmm_magic == LOV_MAGIC_V3)
1490 return sizeof(struct lov_mds_md_v3) +
1491 stripes * sizeof(struct lov_ost_data_v1);
1492 else
1493 return sizeof(struct lov_mds_md_v1) +
1494 stripes * sizeof(struct lov_ost_data_v1);
1495}
1496
081b7265
JH
1497static inline __u32
1498lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1499{
1500 switch (lmm_magic) {
1501 case LOV_MAGIC_V1: {
1502 struct lov_mds_md_v1 lmm;
1503
1504 if (buf_size < sizeof(lmm))
1505 return 0;
1506
1507 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1508 }
1509 case LOV_MAGIC_V3: {
1510 struct lov_mds_md_v3 lmm;
1511
1512 if (buf_size < sizeof(lmm))
1513 return 0;
1514
1515 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1516 }
1517 default:
1518 return 0;
1519 }
1520}
bc06a678 1521
a1e616b0 1522#define OBD_MD_FLID (0x00000001ULL) /* object ID */
d7e09d03
PT
1523#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1524#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1525#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1526#define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1527#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1528#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1529#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1530#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1531#define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1532#define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1533#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1534#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1535#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1536/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1537#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1538#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1539#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1540#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1541#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1542#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1543/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
0a4bea92 1544/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */
d7e09d03
PT
1545#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1546#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1547#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
a1e616b0
OD
1548 /* ->mds if epoch opens or closes
1549 */
d7e09d03
PT
1550#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1551#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1552#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1553#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1554#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1555
a1e616b0 1556#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
d7e09d03 1557#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
a1e616b0 1558#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
5ea17d6c 1559#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
d7e09d03
PT
1560
1561#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1562#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1563#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
a1e616b0 1564#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
341f1f0a 1565/* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */
d7e09d03
PT
1566#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1567#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1568#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1569#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1570#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
7fc1f831
AP
1571 * under lock; for xattr
1572 * requests means the
a1e616b0
OD
1573 * client holds the lock
1574 */
d7e09d03
PT
1575#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1576
341f1f0a
FY
1577/* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
1578/* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
1579/* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
1580/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
d7e09d03
PT
1581
1582#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
0ffaa9c8
HD
1583#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
1584 * executed
1585 */
d7e09d03 1586
6e23ea98 1587#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
1588
d7e09d03
PT
1589#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1590 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1591 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1592 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1593 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1594
7fc1f831
AP
1595#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1596
d7e09d03 1597/* don't forget obdo_fid which is way down at the bottom so it can
a1e616b0
OD
1598 * come after the definition of llog_cookie
1599 */
d7e09d03
PT
1600
1601enum hss_valid {
1602 HSS_SETMASK = 0x01,
1603 HSS_CLEARMASK = 0x02,
1604 HSS_ARCHIVE_ID = 0x04,
1605};
1606
1607struct hsm_state_set {
1608 __u32 hss_valid;
1609 __u32 hss_archive_id;
1610 __u64 hss_setmask;
1611 __u64 hss_clearmask;
1612};
1613
d7e09d03
PT
1614/* ost_body.data values for OST_BRW */
1615
a1e616b0
OD
1616#define OBD_BRW_READ 0x01
1617#define OBD_BRW_WRITE 0x02
1618#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1619#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
d7e09d03 1620 * transfer and is not accounted in
a1e616b0
OD
1621 * the grant.
1622 */
1623#define OBD_BRW_CHECK 0x10
d7e09d03 1624#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
a1e616b0
OD
1625#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1626#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1627#define OBD_BRW_NOQUOTA 0x100
1628#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1629#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
d7e09d03
PT
1630#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1631#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1632#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
ad479287
PS
1633#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1634 * that the client is running low on
1635 * space for unstable pages; asking
1636 * it to sync quickly
1637 */
d7e09d03 1638
00c0a6ae 1639#define OBD_OBJECT_EOF LUSTRE_EOF
d7e09d03
PT
1640
1641#define OST_MIN_PRECREATE 32
1642#define OST_MAX_PRECREATE 20000
1643
1644struct obd_ioobj {
1645 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1646 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1647 * now (PTLRPC_BULK_OPS_COUNT - 1) in
a1e616b0
OD
1648 * high 16 bits in 2.4 and later
1649 */
d7e09d03
PT
1650 __u32 ioo_bufcnt; /* number of niobufs for this object */
1651};
1652
5965de81
JX
1653/*
1654 * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
1655 * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
1656 * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
1657 */
d7e09d03 1658#define IOOBJ_MAX_BRW_BITS 16
d7e09d03
PT
1659#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1660#define ioobj_max_brw_set(ioo, num) \
1661do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1662
d7e09d03
PT
1663/* multiple of 8 bytes => can array */
1664struct niobuf_remote {
638814f6
JH
1665 __u64 rnb_offset;
1666 __u32 rnb_len;
1667 __u32 rnb_flags;
d7e09d03
PT
1668};
1669
d7e09d03
PT
1670/* lock value block communicated between the filter and llite */
1671
1672/* OST_LVB_ERR_INIT is needed because the return code in rc is
a1e616b0
OD
1673 * negative, i.e. because ((MASK + rc) & MASK) != MASK.
1674 */
d7e09d03
PT
1675#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1676#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1677#define OST_LVB_IS_ERR(blocks) \
1678 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1679#define OST_LVB_SET_ERR(blocks, rc) \
1680 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1681#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1682
1683struct ost_lvb_v1 {
1684 __u64 lvb_size;
21aef7d9
OD
1685 __s64 lvb_mtime;
1686 __s64 lvb_atime;
1687 __s64 lvb_ctime;
d7e09d03
PT
1688 __u64 lvb_blocks;
1689};
1690
d7e09d03
PT
1691struct ost_lvb {
1692 __u64 lvb_size;
21aef7d9
OD
1693 __s64 lvb_mtime;
1694 __s64 lvb_atime;
1695 __s64 lvb_ctime;
d7e09d03
PT
1696 __u64 lvb_blocks;
1697 __u32 lvb_mtime_ns;
1698 __u32 lvb_atime_ns;
1699 __u32 lvb_ctime_ns;
1700 __u32 lvb_padding;
1701};
1702
d7e09d03
PT
1703/*
1704 * lquota data structures
1705 */
1706
03440c4e 1707/* The lquota_id structure is a union of all the possible identifier types that
d7e09d03
PT
1708 * can be used with quota, this includes:
1709 * - 64-bit user ID
1710 * - 64-bit group ID
a1e616b0
OD
1711 * - a FID which can be used for per-directory quota in the future
1712 */
d7e09d03
PT
1713union lquota_id {
1714 struct lu_fid qid_fid; /* FID for per-directory quota */
1715 __u64 qid_uid; /* user identifier */
1716 __u64 qid_gid; /* group identifier */
1717};
1718
1719/* quotactl management */
1720struct obd_quotactl {
1721 __u32 qc_cmd;
1722 __u32 qc_type; /* see Q_* flag below */
1723 __u32 qc_id;
1724 __u32 qc_stat;
1725 struct obd_dqinfo qc_dqinfo;
1726 struct obd_dqblk qc_dqblk;
1727};
1728
d7e09d03
PT
1729#define Q_COPY(out, in, member) (out)->member = (in)->member
1730
1731#define QCTL_COPY(out, in) \
1732do { \
1733 Q_COPY(out, in, qc_cmd); \
1734 Q_COPY(out, in, qc_type); \
1735 Q_COPY(out, in, qc_id); \
1736 Q_COPY(out, in, qc_stat); \
1737 Q_COPY(out, in, qc_dqinfo); \
1738 Q_COPY(out, in, qc_dqblk); \
1739} while (0)
1740
d7e09d03
PT
1741/* Data structures associated with the quota locks */
1742
1743/* Glimpse descriptor used for the index & per-ID quota locks */
1744struct ldlm_gl_lquota_desc {
1745 union lquota_id gl_id; /* quota ID subject to the glimpse */
1746 __u64 gl_flags; /* see LQUOTA_FL* below */
1747 __u64 gl_ver; /* new index version */
1748 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1749 __u64 gl_softlimit; /* new softlimit */
1750 __u64 gl_time;
1751 __u64 gl_pad2;
1752};
c9f6bb96 1753
d7e09d03
PT
1754/* quota glimpse flags */
1755#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1756
1757/* LVB used with quota (global and per-ID) locks */
1758struct lquota_lvb {
1759 __u64 lvb_flags; /* see LQUOTA_FL* above */
1760 __u64 lvb_id_may_rel; /* space that might be released later */
1761 __u64 lvb_id_rel; /* space released by the slave for this ID */
1762 __u64 lvb_id_qunit; /* current qunit value */
1763 __u64 lvb_pad1;
1764};
1765
d7e09d03 1766/* op codes */
115b4f9c 1767enum quota_cmd {
d7e09d03
PT
1768 QUOTA_DQACQ = 601,
1769 QUOTA_DQREL = 602,
1770 QUOTA_LAST_OPC
115b4f9c 1771};
d7e09d03
PT
1772#define QUOTA_FIRST_OPC QUOTA_DQACQ
1773
1774/*
1775 * MDS REQ RECORDS
1776 */
1777
1778/* opcodes */
303e4002 1779enum mds_cmd {
d7e09d03
PT
1780 MDS_GETATTR = 33,
1781 MDS_GETATTR_NAME = 34,
1782 MDS_CLOSE = 35,
1783 MDS_REINT = 36,
1784 MDS_READPAGE = 37,
1785 MDS_CONNECT = 38,
1786 MDS_DISCONNECT = 39,
1787 MDS_GETSTATUS = 40,
1788 MDS_STATFS = 41,
58c78cd2
JH
1789 MDS_PIN = 42, /* obsolete, never used in a release */
1790 MDS_UNPIN = 43, /* obsolete, never used in a release */
d7e09d03 1791 MDS_SYNC = 44,
a823acf5 1792 MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */
d7e09d03 1793 MDS_SET_INFO = 46,
e57721e7 1794 MDS_QUOTACHECK = 47, /* not used since 2.4 */
d7e09d03
PT
1795 MDS_QUOTACTL = 48,
1796 MDS_GETXATTR = 49,
1797 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
1798 MDS_WRITEPAGE = 51,
58c78cd2 1799 MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
d7e09d03
PT
1800 MDS_GET_INFO = 53,
1801 MDS_HSM_STATE_GET = 54,
1802 MDS_HSM_STATE_SET = 55,
1803 MDS_HSM_ACTION = 56,
1804 MDS_HSM_PROGRESS = 57,
1805 MDS_HSM_REQUEST = 58,
1806 MDS_HSM_CT_REGISTER = 59,
1807 MDS_HSM_CT_UNREGISTER = 60,
1808 MDS_SWAP_LAYOUTS = 61,
1809 MDS_LAST_OPC
303e4002 1810};
d7e09d03
PT
1811
1812#define MDS_FIRST_OPC MDS_GETATTR
1813
d7e09d03
PT
1814/*
1815 * Do not exceed 63
1816 */
1817
07e2eb39 1818enum mdt_reint_cmd {
d7e09d03
PT
1819 REINT_SETATTR = 1,
1820 REINT_CREATE = 2,
1821 REINT_LINK = 3,
1822 REINT_UNLINK = 4,
1823 REINT_RENAME = 5,
1824 REINT_OPEN = 6,
1825 REINT_SETXATTR = 7,
1826 REINT_RMENTRY = 8,
79496845 1827 REINT_MIGRATE = 9,
d7e09d03 1828 REINT_MAX
07e2eb39 1829};
d7e09d03 1830
d7e09d03
PT
1831/* the disposition of the intent outlines what was executed */
1832#define DISP_IT_EXECD 0x00000001
1833#define DISP_LOOKUP_EXECD 0x00000002
1834#define DISP_LOOKUP_NEG 0x00000004
1835#define DISP_LOOKUP_POS 0x00000008
1836#define DISP_OPEN_CREATE 0x00000010
1837#define DISP_OPEN_OPEN 0x00000020
f236f69b 1838#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
d7e09d03
PT
1839#define DISP_ENQ_OPEN_REF 0x00800000
1840#define DISP_ENQ_CREATE_REF 0x01000000
1841#define DISP_OPEN_LOCK 0x02000000
d3a8a4e2 1842#define DISP_OPEN_LEASE 0x04000000
63d42578 1843#define DISP_OPEN_STRIPE 0x08000000
864d6a25 1844#define DISP_OPEN_DENY 0x10000000
d7e09d03
PT
1845
1846/* INODE LOCK PARTS */
fe4c58af 1847#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
1848 * was used to protect permission (mode,
a1e616b0
OD
1849 * owner, group etc) before 2.4.
1850 */
fe4c58af 1851#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
1852#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
1853#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
1854
1855/* The PERM bit is added int 2.4, and it is used to protect permission(mode,
1856 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
1857 * Because for remote directories(in DNE), these locks will be granted by
1858 * different MDTs(different ldlm namespace).
1859 *
1860 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
1861 * For Remote directory, the master MDT, where the remote directory is, will
1862 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
a1e616b0
OD
1863 * will grant LOOKUP_LOCK.
1864 */
fe4c58af 1865#define MDS_INODELOCK_PERM 0x000010
1866#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
d7e09d03 1867
7fc1f831 1868#define MDS_INODELOCK_MAXSHIFT 5
d7e09d03 1869/* This FULL lock is useful to take on unlink sort of operations */
cd94f231 1870#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
d7e09d03 1871
d7e09d03
PT
1872/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
1873 * but was moved into name[1] along with the OID to avoid consuming the
a1e616b0
OD
1874 * name[2,3] fields that need to be used for the quota id (also a FID).
1875 */
d7e09d03
PT
1876enum {
1877 LUSTRE_RES_ID_SEQ_OFF = 0,
1878 LUSTRE_RES_ID_VER_OID_OFF = 1,
1879 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
1880 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
1881 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
1882 LUSTRE_RES_ID_HSH_OFF = 3
1883};
1884
1885#define MDS_STATUS_CONN 1
1886#define MDS_STATUS_LOV 2
1887
d7e09d03 1888/* these should be identical to their EXT4_*_FL counterparts, they are
a1e616b0
OD
1889 * redefined here only to avoid dragging in fs/ext4/ext4.h
1890 */
d7e09d03
PT
1891#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
1892#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
1893#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
c90cfdba 1894#define LUSTRE_NODUMP_FL 0x00000040 /* do not dump file */
d7e09d03 1895#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
c90cfdba 1896#define LUSTRE_INDEX_FL 0x00001000 /* hash-indexed directory */
d7e09d03 1897#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
c90cfdba
AD
1898#define LUSTRE_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
1899#define LUSTRE_DIRECTIO_FL 0x00100000 /* Use direct i/o */
1900#define LUSTRE_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
d7e09d03
PT
1901
1902/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
1903 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
1904 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
1905 * the S_* flags are kernel-internal values that change between kernel
1906 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
a1e616b0
OD
1907 * See b=16526 for a full history.
1908 */
d7e09d03
PT
1909static inline int ll_ext_to_inode_flags(int flags)
1910{
1911 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
1912 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
1913 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
d7e09d03 1914 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
d7e09d03
PT
1915 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
1916}
1917
1918static inline int ll_inode_to_ext_flags(int iflags)
1919{
1920 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
1921 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
1922 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
d7e09d03 1923 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
d7e09d03
PT
1924 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
1925}
1926
5ea17d6c
JL
1927/* 64 possible states */
1928enum md_transient_state {
1929 MS_RESTORE = (1 << 0), /* restore is running */
1930};
1931
d7e09d03 1932struct mdt_body {
2e1b5b8b
JH
1933 struct lu_fid mbo_fid1;
1934 struct lu_fid mbo_fid2;
1935 struct lustre_handle mbo_handle;
1936 __u64 mbo_valid;
1937 __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
1938 __s64 mbo_mtime;
1939 __s64 mbo_atime;
1940 __s64 mbo_ctime;
1941 __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
1942 __u64 mbo_ioepoch;
1943 __u64 mbo_t_state; /* transient file state defined in
1944 * enum md_transient_state
1945 * was "ino" until 2.4.0
1946 */
1947 __u32 mbo_fsuid;
1948 __u32 mbo_fsgid;
1949 __u32 mbo_capability;
1950 __u32 mbo_mode;
1951 __u32 mbo_uid;
1952 __u32 mbo_gid;
c90cfdba 1953 __u32 mbo_flags; /* LUSTRE_*_FL file attributes */
2e1b5b8b
JH
1954 __u32 mbo_rdev;
1955 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
1956 __u32 mbo_unused2; /* was "generation" until 2.4.0 */
1957 __u32 mbo_suppgid;
1958 __u32 mbo_eadatasize;
1959 __u32 mbo_aclsize;
1960 __u32 mbo_max_mdsize;
0a4bea92 1961 __u32 mbo_unused3; /* was max_cookiesize until 2.8 */
2e1b5b8b
JH
1962 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
1963 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
1964 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
1965 __u64 mbo_padding_6;
1966 __u64 mbo_padding_7;
1967 __u64 mbo_padding_8;
1968 __u64 mbo_padding_9;
1969 __u64 mbo_padding_10;
d7e09d03
PT
1970}; /* 216 */
1971
d7e09d03 1972struct mdt_ioepoch {
a823acf5
JH
1973 struct lustre_handle mio_handle;
1974 __u64 mio_unused1; /* was ioepoch */
1975 __u32 mio_unused2; /* was flags */
1976 __u32 mio_padding;
d7e09d03
PT
1977};
1978
d7e09d03
PT
1979/* permissions for md_perm.mp_perm */
1980enum {
1981 CFS_SETUID_PERM = 0x01,
1982 CFS_SETGID_PERM = 0x02,
1983 CFS_SETGRP_PERM = 0x04,
d7e09d03
PT
1984};
1985
d7e09d03
PT
1986struct mdt_rec_setattr {
1987 __u32 sa_opcode;
1988 __u32 sa_cap;
1989 __u32 sa_fsuid;
1990 __u32 sa_fsuid_h;
1991 __u32 sa_fsgid;
1992 __u32 sa_fsgid_h;
1993 __u32 sa_suppgid;
1994 __u32 sa_suppgid_h;
1995 __u32 sa_padding_1;
1996 __u32 sa_padding_1_h;
1997 struct lu_fid sa_fid;
1998 __u64 sa_valid;
1999 __u32 sa_uid;
2000 __u32 sa_gid;
2001 __u64 sa_size;
2002 __u64 sa_blocks;
21aef7d9
OD
2003 __s64 sa_mtime;
2004 __s64 sa_atime;
2005 __s64 sa_ctime;
d7e09d03
PT
2006 __u32 sa_attr_flags;
2007 __u32 sa_mode;
2008 __u32 sa_bias; /* some operation flags */
2009 __u32 sa_padding_3;
2010 __u32 sa_padding_4;
2011 __u32 sa_padding_5;
2012};
2013
d7e09d03
PT
2014/*
2015 * Attribute flags used in mdt_rec_setattr::sa_valid.
2016 * The kernel's #defines for ATTR_* should not be used over the network
2017 * since the client and MDS may run different kernels (see bug 13828)
2018 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2019 */
a1e616b0
OD
2020#define MDS_ATTR_MODE 0x1ULL /* = 1 */
2021#define MDS_ATTR_UID 0x2ULL /* = 2 */
2022#define MDS_ATTR_GID 0x4ULL /* = 4 */
2023#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2024#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2025#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2026#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
d7e09d03
PT
2027#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2028#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2029#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2030#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2031#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2032#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2033#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
a1e616b0
OD
2034#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
2035 * ie O_TRUNC
2036 */
d7e09d03
PT
2037#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2038
d7e09d03
PT
2039#define MDS_FMODE_CLOSED 00000000
2040#define MDS_FMODE_EXEC 00000004
a823acf5
JH
2041/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */
2042/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */
2043/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */
d7e09d03
PT
2044
2045#define MDS_OPEN_CREATED 00000010
2046#define MDS_OPEN_CROSS 00000020
2047
2048#define MDS_OPEN_CREAT 00000100
2049#define MDS_OPEN_EXCL 00000200
2050#define MDS_OPEN_TRUNC 00001000
2051#define MDS_OPEN_APPEND 00002000
2052#define MDS_OPEN_SYNC 00010000
2053#define MDS_OPEN_DIRECTORY 00200000
2054
2055#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2056#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2057#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2058#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2059 * We do not support JOIN FILE
2060 * anymore, reserve this flags
2061 * just for preventing such bit
a1e616b0
OD
2062 * to be reused.
2063 */
d7e09d03 2064
a1e616b0 2065#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
d7e09d03
PT
2066#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2067#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2068#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2069#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2070 * hsm restore) */
2071#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2072 unlinked */
d3a8a4e2
JX
2073#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2074 * delegation, succeed if it's not
2075 * being opened with conflict mode.
2076 */
48d23e61 2077#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
d7e09d03 2078
c1b66fcc
LS
2079#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
2080 MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
2081 MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
2082 MDS_OPEN_RELEASE)
2083
48d23e61 2084enum mds_op_bias {
d7e09d03
PT
2085 MDS_CHECK_SPLIT = 1 << 0,
2086 MDS_CROSS_REF = 1 << 1,
2087 MDS_VTX_BYPASS = 1 << 2,
2088 MDS_PERM_BYPASS = 1 << 3,
a823acf5 2089/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */
d7e09d03
PT
2090 MDS_QUOTA_IGNORE = 1 << 5,
2091 MDS_CLOSE_CLEANUP = 1 << 6,
2092 MDS_KEEP_ORPHAN = 1 << 7,
2093 MDS_RECOV_OPEN = 1 << 8,
2094 MDS_DATA_MODIFIED = 1 << 9,
2095 MDS_CREATE_VOLATILE = 1 << 10,
2096 MDS_OWNEROVERRIDE = 1 << 11,
48d23e61 2097 MDS_HSM_RELEASE = 1 << 12,
79496845 2098 MDS_RENAME_MIGRATE = BIT(13),
0ffaa9c8 2099 MDS_CLOSE_LAYOUT_SWAP = BIT(14),
d7e09d03
PT
2100};
2101
2102/* instance of mdt_reint_rec */
2103struct mdt_rec_create {
2104 __u32 cr_opcode;
2105 __u32 cr_cap;
2106 __u32 cr_fsuid;
2107 __u32 cr_fsuid_h;
2108 __u32 cr_fsgid;
2109 __u32 cr_fsgid_h;
2110 __u32 cr_suppgid1;
2111 __u32 cr_suppgid1_h;
2112 __u32 cr_suppgid2;
2113 __u32 cr_suppgid2_h;
2114 struct lu_fid cr_fid1;
2115 struct lu_fid cr_fid2;
2116 struct lustre_handle cr_old_handle; /* handle in case of open replay */
21aef7d9 2117 __s64 cr_time;
d7e09d03
PT
2118 __u64 cr_rdev;
2119 __u64 cr_ioepoch;
2120 __u64 cr_padding_1; /* rr_blocks */
2121 __u32 cr_mode;
2122 __u32 cr_bias;
2123 /* use of helpers set/get_mrc_cr_flags() is needed to access
2124 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
a1e616b0
OD
2125 * extend cr_flags size without breaking 1.8 compat
2126 */
d7e09d03
PT
2127 __u32 cr_flags_l; /* for use with open, low 32 bits */
2128 __u32 cr_flags_h; /* for use with open, high 32 bits */
2129 __u32 cr_umask; /* umask for create */
2130 __u32 cr_padding_4; /* rr_padding_4 */
2131};
2132
2133static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2134{
2135 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2136 mrc->cr_flags_h = (__u32)(flags >> 32);
2137}
2138
2139static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2140{
2141 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2142}
2143
2144/* instance of mdt_reint_rec */
2145struct mdt_rec_link {
2146 __u32 lk_opcode;
2147 __u32 lk_cap;
2148 __u32 lk_fsuid;
2149 __u32 lk_fsuid_h;
2150 __u32 lk_fsgid;
2151 __u32 lk_fsgid_h;
2152 __u32 lk_suppgid1;
2153 __u32 lk_suppgid1_h;
2154 __u32 lk_suppgid2;
2155 __u32 lk_suppgid2_h;
2156 struct lu_fid lk_fid1;
2157 struct lu_fid lk_fid2;
21aef7d9 2158 __s64 lk_time;
d7e09d03
PT
2159 __u64 lk_padding_1; /* rr_atime */
2160 __u64 lk_padding_2; /* rr_ctime */
2161 __u64 lk_padding_3; /* rr_size */
2162 __u64 lk_padding_4; /* rr_blocks */
2163 __u32 lk_bias;
2164 __u32 lk_padding_5; /* rr_mode */
2165 __u32 lk_padding_6; /* rr_flags */
2166 __u32 lk_padding_7; /* rr_padding_2 */
2167 __u32 lk_padding_8; /* rr_padding_3 */
2168 __u32 lk_padding_9; /* rr_padding_4 */
2169};
2170
2171/* instance of mdt_reint_rec */
2172struct mdt_rec_unlink {
2173 __u32 ul_opcode;
2174 __u32 ul_cap;
2175 __u32 ul_fsuid;
2176 __u32 ul_fsuid_h;
2177 __u32 ul_fsgid;
2178 __u32 ul_fsgid_h;
2179 __u32 ul_suppgid1;
2180 __u32 ul_suppgid1_h;
2181 __u32 ul_suppgid2;
2182 __u32 ul_suppgid2_h;
2183 struct lu_fid ul_fid1;
2184 struct lu_fid ul_fid2;
21aef7d9 2185 __s64 ul_time;
d7e09d03
PT
2186 __u64 ul_padding_2; /* rr_atime */
2187 __u64 ul_padding_3; /* rr_ctime */
2188 __u64 ul_padding_4; /* rr_size */
2189 __u64 ul_padding_5; /* rr_blocks */
2190 __u32 ul_bias;
2191 __u32 ul_mode;
2192 __u32 ul_padding_6; /* rr_flags */
2193 __u32 ul_padding_7; /* rr_padding_2 */
2194 __u32 ul_padding_8; /* rr_padding_3 */
2195 __u32 ul_padding_9; /* rr_padding_4 */
2196};
2197
2198/* instance of mdt_reint_rec */
2199struct mdt_rec_rename {
2200 __u32 rn_opcode;
2201 __u32 rn_cap;
2202 __u32 rn_fsuid;
2203 __u32 rn_fsuid_h;
2204 __u32 rn_fsgid;
2205 __u32 rn_fsgid_h;
2206 __u32 rn_suppgid1;
2207 __u32 rn_suppgid1_h;
2208 __u32 rn_suppgid2;
2209 __u32 rn_suppgid2_h;
2210 struct lu_fid rn_fid1;
2211 struct lu_fid rn_fid2;
21aef7d9 2212 __s64 rn_time;
d7e09d03
PT
2213 __u64 rn_padding_1; /* rr_atime */
2214 __u64 rn_padding_2; /* rr_ctime */
2215 __u64 rn_padding_3; /* rr_size */
2216 __u64 rn_padding_4; /* rr_blocks */
2217 __u32 rn_bias; /* some operation flags */
2218 __u32 rn_mode; /* cross-ref rename has mode */
2219 __u32 rn_padding_5; /* rr_flags */
2220 __u32 rn_padding_6; /* rr_padding_2 */
2221 __u32 rn_padding_7; /* rr_padding_3 */
2222 __u32 rn_padding_8; /* rr_padding_4 */
2223};
2224
2225/* instance of mdt_reint_rec */
2226struct mdt_rec_setxattr {
2227 __u32 sx_opcode;
2228 __u32 sx_cap;
2229 __u32 sx_fsuid;
2230 __u32 sx_fsuid_h;
2231 __u32 sx_fsgid;
2232 __u32 sx_fsgid_h;
2233 __u32 sx_suppgid1;
2234 __u32 sx_suppgid1_h;
2235 __u32 sx_suppgid2;
2236 __u32 sx_suppgid2_h;
2237 struct lu_fid sx_fid;
2238 __u64 sx_padding_1; /* These three are rr_fid2 */
2239 __u32 sx_padding_2;
2240 __u32 sx_padding_3;
2241 __u64 sx_valid;
21aef7d9 2242 __s64 sx_time;
d7e09d03
PT
2243 __u64 sx_padding_5; /* rr_ctime */
2244 __u64 sx_padding_6; /* rr_size */
2245 __u64 sx_padding_7; /* rr_blocks */
2246 __u32 sx_size;
2247 __u32 sx_flags;
2248 __u32 sx_padding_8; /* rr_flags */
2249 __u32 sx_padding_9; /* rr_padding_2 */
2250 __u32 sx_padding_10; /* rr_padding_3 */
2251 __u32 sx_padding_11; /* rr_padding_4 */
2252};
2253
2254/*
2255 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2256 * Do NOT change the size of various members, otherwise the value
2257 * will be broken in lustre_swab_mdt_rec_reint().
2258 *
f16192ed 2259 * If you add new members in other mdt_reint_xxx structures and need to use the
d7e09d03
PT
2260 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2261 */
2262struct mdt_rec_reint {
2263 __u32 rr_opcode;
2264 __u32 rr_cap;
2265 __u32 rr_fsuid;
2266 __u32 rr_fsuid_h;
2267 __u32 rr_fsgid;
2268 __u32 rr_fsgid_h;
2269 __u32 rr_suppgid1;
2270 __u32 rr_suppgid1_h;
2271 __u32 rr_suppgid2;
2272 __u32 rr_suppgid2_h;
2273 struct lu_fid rr_fid1;
2274 struct lu_fid rr_fid2;
21aef7d9
OD
2275 __s64 rr_mtime;
2276 __s64 rr_atime;
2277 __s64 rr_ctime;
d7e09d03
PT
2278 __u64 rr_size;
2279 __u64 rr_blocks;
2280 __u32 rr_bias;
2281 __u32 rr_mode;
2282 __u32 rr_flags;
2283 __u32 rr_flags_h;
2284 __u32 rr_umask;
2285 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2286};
2287
b78c2b9b 2288/* lmv structures */
d7e09d03
PT
2289struct lmv_desc {
2290 __u32 ld_tgt_count; /* how many MDS's */
2291 __u32 ld_active_tgt_count; /* how many active */
2292 __u32 ld_default_stripe_count; /* how many objects are used */
2de35386 2293 __u32 ld_pattern; /* default hash pattern */
d7e09d03
PT
2294 __u64 ld_default_hash_size;
2295 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2296 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2297 __u32 ld_qos_maxage; /* in second */
2298 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2299 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2300 struct obd_uuid ld_uuid;
2301};
2302
8f18c8a4 2303/* LMV layout EA, and it will be stored both in master and slave object */
2304struct lmv_mds_md_v1 {
2305 __u32 lmv_magic;
2306 __u32 lmv_stripe_count;
2307 __u32 lmv_master_mdt_index; /* On master object, it is master
2308 * MDT index, on slave object, it
2309 * is stripe index of the slave obj
2310 */
2311 __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
2312 * which hash function to be used,
2313 * Note: only lower 16 bits is being
2314 * used for now. Higher 16 bits will
2315 * be used to mark the object status,
2316 * for example migrating or dead.
2317 */
2318 __u32 lmv_layout_version; /* Used for directory restriping */
c8deb3cb
FY
2319 __u32 lmv_padding1;
2320 __u64 lmv_padding2;
2321 __u64 lmv_padding3;
aaf06e29 2322 char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
8f18c8a4 2323 struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
2324};
2de35386 2325
8f18c8a4 2326#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
2327#define LMV_MAGIC LMV_MAGIC_V1
2328
2329/* #define LMV_USER_MAGIC 0x0CD30CD0 */
2330#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
2331
2332/*
2333 *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
2334 * and the higher part will be the flag to indicate the status of object,
2335 * for example the object is being migrated. And the hash function
2336 * might be interpreted differently with different flags.
2337 */
8f18c8a4 2338#define LMV_HASH_TYPE_MASK 0x0000ffff
2339
2340#define LMV_HASH_FLAG_MIGRATION 0x80000000
2341#define LMV_HASH_FLAG_DEAD 0x40000000
2342
2de35386 2343/**
2344 * The FNV-1a hash algorithm is as follows:
2345 * hash = FNV_offset_basis
2346 * for each octet_of_data to be hashed
2347 * hash = hash XOR octet_of_data
2348 * hash = hash × FNV_prime
2349 * return hash
2350 * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
2351 *
2352 * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
2353 * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
2354 **/
2355#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
2356#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
2357static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
2358{
2359 __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
2360 const unsigned char *p = buf;
2361 size_t i;
2362
2363 for (i = 0; i < size; i++) {
2364 hash ^= p[i];
2365 hash *= LUSTRE_FNV_1A_64_PRIME;
2366 }
2367
2368 return hash;
2369}
2370
8e9dfe8a 2371union lmv_mds_md {
2372 __u32 lmv_magic;
2373 struct lmv_mds_md_v1 lmv_md_v1;
2374 struct lmv_user_md lmv_user_md;
2375};
2376
2377static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2378{
2379 ssize_t len = -EINVAL;
2380
2381 switch (lmm_magic) {
8f18c8a4 2382 case LMV_MAGIC_V1: {
8e9dfe8a 2383 struct lmv_mds_md_v1 *lmm1;
2384
2385 len = sizeof(*lmm1);
2386 len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
2387 break; }
2388 default:
2389 break;
2390 }
2391 return len;
2392}
2393
2394static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
2395{
2396 switch (le32_to_cpu(lmm->lmv_magic)) {
2397 case LMV_MAGIC_V1:
2398 return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
2399 case LMV_USER_MAGIC:
2400 return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
2401 default:
2402 return -EINVAL;
2403 }
2404}
2405
2406static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
2407 unsigned int stripe_count)
2408{
2409 int rc = 0;
2410
2411 switch (le32_to_cpu(lmm->lmv_magic)) {
2412 case LMV_MAGIC_V1:
2413 lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
2414 break;
2415 case LMV_USER_MAGIC:
2416 lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
2417 break;
2418 default:
2419 rc = -EINVAL;
2420 break;
2421 }
2422 return rc;
2423}
2424
d7e09d03 2425enum fld_rpc_opc {
b78c2b9b 2426 FLD_QUERY = 900,
2427 FLD_READ = 901,
d7e09d03 2428 FLD_LAST_OPC,
b78c2b9b 2429 FLD_FIRST_OPC = FLD_QUERY
d7e09d03
PT
2430};
2431
2432enum seq_rpc_opc {
2433 SEQ_QUERY = 700,
2434 SEQ_LAST_OPC,
2435 SEQ_FIRST_OPC = SEQ_QUERY
2436};
2437
2438enum seq_op {
2439 SEQ_ALLOC_SUPER = 0,
2440 SEQ_ALLOC_META = 1
2441};
2442
b78c2b9b 2443enum fld_op {
2444 FLD_CREATE = 0,
2445 FLD_DELETE = 1,
2446 FLD_LOOKUP = 2,
2447};
2448
d7e09d03
PT
2449/*
2450 * LOV data structures
2451 */
2452
2453#define LOV_MAX_UUID_BUFFER_SIZE 8192
2454/* The size of the buffer the lov/mdc reserves for the
2455 * array of UUIDs returned by the MDS. With the current
a1e616b0
OD
2456 * protocol, this will limit the max number of OSTs per LOV
2457 */
d7e09d03
PT
2458
2459#define LOV_DESC_MAGIC 0xB0CCDE5C
081b7265
JH
2460#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2461#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
d7e09d03
PT
2462
2463/* LOV settings descriptor (should only contain static info) */
2464struct lov_desc {
2465 __u32 ld_tgt_count; /* how many OBD's */
a1e616b0
OD
2466 __u32 ld_active_tgt_count; /* how many active */
2467 __u32 ld_default_stripe_count; /* how many objects are used */
2468 __u32 ld_pattern; /* default PATTERN_RAID0 */
2469 __u64 ld_default_stripe_size; /* in bytes */
2470 __u64 ld_default_stripe_offset; /* in bytes */
d7e09d03 2471 __u32 ld_padding_0; /* unused */
a1e616b0 2472 __u32 ld_qos_maxage; /* in second */
d7e09d03
PT
2473 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2474 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2475 struct obd_uuid ld_uuid;
2476};
2477
2478#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2479
d7e09d03
PT
2480/*
2481 * LDLM requests:
2482 */
2483/* opcodes -- MUST be distinct from OST/MDS opcodes */
980b745f 2484enum ldlm_cmd {
d7e09d03
PT
2485 LDLM_ENQUEUE = 101,
2486 LDLM_CONVERT = 102,
2487 LDLM_CANCEL = 103,
2488 LDLM_BL_CALLBACK = 104,
2489 LDLM_CP_CALLBACK = 105,
2490 LDLM_GL_CALLBACK = 106,
2491 LDLM_SET_INFO = 107,
2492 LDLM_LAST_OPC
980b745f 2493};
d7e09d03
PT
2494#define LDLM_FIRST_OPC LDLM_ENQUEUE
2495
2496#define RES_NAME_SIZE 4
2497struct ldlm_res_id {
2498 __u64 name[RES_NAME_SIZE];
2499};
2500
55f5a824 2501#define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
ce74f92d
AD
2502#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2503 (res)->lr_name.name[2], (res)->lr_name.name[3]
2504
d8f183b3
JH
2505static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
2506 const struct ldlm_res_id *res1)
d7e09d03
PT
2507{
2508 return !memcmp(res0, res1, sizeof(*res0));
2509}
2510
2511/* lock types */
52ee0d20 2512enum ldlm_mode {
d7e09d03
PT
2513 LCK_MINMODE = 0,
2514 LCK_EX = 1,
2515 LCK_PW = 2,
2516 LCK_PR = 4,
2517 LCK_CW = 8,
2518 LCK_CR = 16,
2519 LCK_NL = 32,
2520 LCK_GROUP = 64,
2521 LCK_COS = 128,
2522 LCK_MAXMODE
52ee0d20 2523};
d7e09d03
PT
2524
2525#define LCK_MODE_NUM 8
2526
52ee0d20 2527enum ldlm_type {
d7e09d03
PT
2528 LDLM_PLAIN = 10,
2529 LDLM_EXTENT = 11,
2530 LDLM_FLOCK = 12,
2531 LDLM_IBITS = 13,
2532 LDLM_MAX_TYPE
52ee0d20 2533};
d7e09d03
PT
2534
2535#define LDLM_MIN_TYPE LDLM_PLAIN
2536
2537struct ldlm_extent {
2538 __u64 start;
2539 __u64 end;
2540 __u64 gid;
2541};
2542
ac8f0a5c
JH
2543static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
2544 const struct ldlm_extent *ex2)
d7e09d03
PT
2545{
2546 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2547}
2548
2549/* check if @ex1 contains @ex2 */
ac8f0a5c
JH
2550static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
2551 const struct ldlm_extent *ex2)
d7e09d03
PT
2552{
2553 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2554}
2555
2556struct ldlm_inodebits {
2557 __u64 bits;
2558};
2559
2560struct ldlm_flock_wire {
2561 __u64 lfw_start;
2562 __u64 lfw_end;
2563 __u64 lfw_owner;
2564 __u32 lfw_padding;
2565 __u32 lfw_pid;
2566};
2567
2568/* it's important that the fields of the ldlm_extent structure match
2569 * the first fields of the ldlm_flock structure because there is only
2570 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2571 * this ever changes we will need to swab the union differently based
a1e616b0
OD
2572 * on the resource type.
2573 */
d7e09d03 2574
394a9726 2575union ldlm_wire_policy_data {
d7e09d03
PT
2576 struct ldlm_extent l_extent;
2577 struct ldlm_flock_wire l_flock;
2578 struct ldlm_inodebits l_inodebits;
394a9726 2579};
d7e09d03 2580
d7e09d03
PT
2581union ldlm_gl_desc {
2582 struct ldlm_gl_lquota_desc lquota_desc;
2583};
2584
43eb3b33
BE
2585enum ldlm_intent_flags {
2586 IT_OPEN = BIT(0),
2587 IT_CREAT = BIT(1),
2588 IT_OPEN_CREAT = BIT(1) | BIT(0),
2589 IT_READDIR = BIT(2),
2590 IT_GETATTR = BIT(3),
2591 IT_LOOKUP = BIT(4),
2592 IT_UNLINK = BIT(5),
2593 IT_TRUNC = BIT(6),
2594 IT_GETXATTR = BIT(7),
2595 IT_EXEC = BIT(8),
2596 IT_PIN = BIT(9),
2597 IT_LAYOUT = BIT(10),
2598 IT_QUOTA_DQACQ = BIT(11),
2599 IT_QUOTA_CONN = BIT(12),
2600 IT_SETXATTR = BIT(13),
2601};
2602
d7e09d03
PT
2603struct ldlm_intent {
2604 __u64 opc;
2605};
2606
d7e09d03 2607struct ldlm_resource_desc {
52ee0d20 2608 enum ldlm_type lr_type;
d7e09d03
PT
2609 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2610 struct ldlm_res_id lr_name;
2611};
2612
d7e09d03
PT
2613struct ldlm_lock_desc {
2614 struct ldlm_resource_desc l_resource;
52ee0d20
OD
2615 enum ldlm_mode l_req_mode;
2616 enum ldlm_mode l_granted_mode;
394a9726 2617 union ldlm_wire_policy_data l_policy_data;
d7e09d03
PT
2618};
2619
d7e09d03
PT
2620#define LDLM_LOCKREQ_HANDLES 2
2621#define LDLM_ENQUEUE_CANCEL_OFF 1
2622
2623struct ldlm_request {
2624 __u32 lock_flags;
2625 __u32 lock_count;
2626 struct ldlm_lock_desc lock_desc;
2627 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2628};
2629
d7e09d03 2630/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
a1e616b0
OD
2631 * Otherwise, 2 are available.
2632 */
1d8cb70c 2633#define ldlm_request_bufsize(count, type) \
d7e09d03
PT
2634({ \
2635 int _avail = LDLM_LOCKREQ_HANDLES; \
2636 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2637 sizeof(struct ldlm_request) + \
2638 (count > _avail ? count - _avail : 0) * \
2639 sizeof(struct lustre_handle); \
2640})
2641
2642struct ldlm_reply {
2643 __u32 lock_flags;
2644 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2645 struct ldlm_lock_desc lock_desc;
2646 struct lustre_handle lock_handle;
2647 __u64 lock_policy_res1;
2648 __u64 lock_policy_res2;
2649};
2650
d7e09d03
PT
2651#define ldlm_flags_to_wire(flags) ((__u32)(flags))
2652#define ldlm_flags_from_wire(flags) ((__u64)(flags))
2653
2654/*
2655 * Opcodes for mountconf (mgs and mgc)
2656 */
66423bff 2657enum mgs_cmd {
d7e09d03
PT
2658 MGS_CONNECT = 250,
2659 MGS_DISCONNECT,
2660 MGS_EXCEPTION, /* node died, etc. */
2661 MGS_TARGET_REG, /* whenever target starts up */
2662 MGS_TARGET_DEL,
2663 MGS_SET_INFO,
2664 MGS_CONFIG_READ,
2665 MGS_LAST_OPC
66423bff 2666};
d7e09d03
PT
2667#define MGS_FIRST_OPC MGS_CONNECT
2668
2669#define MGS_PARAM_MAXLEN 1024
2670#define KEY_SET_INFO "set_info"
2671
2672struct mgs_send_param {
2673 char mgs_param[MGS_PARAM_MAXLEN];
2674};
2675
2676/* We pass this info to the MGS so it can write config logs */
2677#define MTI_NAME_MAXLEN 64
2678#define MTI_PARAM_MAXLEN 4096
2679#define MTI_NIDS_MAX 32
2680struct mgs_target_info {
2681 __u32 mti_lustre_ver;
2682 __u32 mti_stripe_index;
2683 __u32 mti_config_ver;
2684 __u32 mti_flags;
2685 __u32 mti_nid_count;
2686 __u32 mti_instance; /* Running instance of target */
2687 char mti_fsname[MTI_NAME_MAXLEN];
2688 char mti_svname[MTI_NAME_MAXLEN];
2689 char mti_uuid[sizeof(struct obd_uuid)];
2690 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2691 char mti_params[MTI_PARAM_MAXLEN];
2692};
8150a97f 2693
d7e09d03
PT
2694struct mgs_nidtbl_entry {
2695 __u64 mne_version; /* table version of this entry */
2696 __u32 mne_instance; /* target instance # */
2697 __u32 mne_index; /* target index */
2698 __u32 mne_length; /* length of this entry - by bytes */
2699 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2700 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2701 __u8 mne_nid_size; /* size of each NID, by bytes */
2702 __u8 mne_nid_count; /* # of NIDs in buffer */
2703 union {
2704 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2705 } u;
2706};
8150a97f 2707
d7e09d03
PT
2708struct mgs_config_body {
2709 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2710 __u64 mcb_offset; /* next index of config log to request */
2711 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2712 __u8 mcb_reserved;
2713 __u8 mcb_bits; /* bits unit size of config log */
2714 __u32 mcb_units; /* # of units for bulk transfer */
2715};
8150a97f 2716
d7e09d03
PT
2717struct mgs_config_res {
2718 __u64 mcr_offset; /* index of last config log */
2719 __u64 mcr_size; /* size of the log */
2720};
8150a97f 2721
d7e09d03
PT
2722/* Config marker flags (in config log) */
2723#define CM_START 0x01
2724#define CM_END 0x02
2725#define CM_SKIP 0x04
2726#define CM_UPGRADE146 0x08
2727#define CM_EXCLUDE 0x10
2728#define CM_START_SKIP (CM_START | CM_SKIP)
2729
2730struct cfg_marker {
2731 __u32 cm_step; /* aka config version */
2732 __u32 cm_flags;
2733 __u32 cm_vers; /* lustre release version number */
2734 __u32 cm_padding; /* 64 bit align */
21aef7d9
OD
2735 __s64 cm_createtime; /*when this record was first created */
2736 __s64 cm_canceltime; /*when this record is no longer valid*/
d7e09d03
PT
2737 char cm_tgtname[MTI_NAME_MAXLEN];
2738 char cm_comment[MTI_NAME_MAXLEN];
2739};
2740
d7e09d03
PT
2741/*
2742 * Opcodes for multiple servers.
2743 */
2744
deadbe9a 2745enum obd_cmd {
d7e09d03
PT
2746 OBD_PING = 400,
2747 OBD_LOG_CANCEL,
e57721e7 2748 OBD_QC_CALLBACK, /* not used since 2.4 */
d7e09d03
PT
2749 OBD_IDX_READ,
2750 OBD_LAST_OPC
deadbe9a 2751};
d7e09d03
PT
2752#define OBD_FIRST_OPC OBD_PING
2753
b14b3ba5
MP
2754/**
2755 * llog contexts indices.
2756 *
2757 * There is compatibility problem with indexes below, they are not
2758 * continuous and must keep their numbers for compatibility needs.
2759 * See LU-5218 for details.
2760 */
2761enum llog_ctxt_id {
2762 LLOG_CONFIG_ORIG_CTXT = 0,
2763 LLOG_CONFIG_REPL_CTXT = 1,
2764 LLOG_MDS_OST_ORIG_CTXT = 2,
2765 LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
2766 LLOG_SIZE_ORIG_CTXT = 4,
2767 LLOG_SIZE_REPL_CTXT = 5,
2768 LLOG_TEST_ORIG_CTXT = 8,
2769 LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
2770 LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
2771 LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
2772 /* for multiple changelog consumers */
2773 LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
2774 LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
2775 LLOG_MAX_CTXTS
2776};
d7e09d03
PT
2777
2778/** Identifier for a single log object */
2779struct llog_logid {
2780 struct ost_id lgl_oi;
2781 __u32 lgl_ogen;
a6e19b4d 2782} __packed;
d7e09d03
PT
2783
2784/** Records written to the CATALOGS list */
2785#define CATLIST "CATALOGS"
2786struct llog_catid {
2787 struct llog_logid lci_logid;
2788 __u32 lci_padding1;
2789 __u32 lci_padding2;
2790 __u32 lci_padding3;
a6e19b4d 2791} __packed;
d7e09d03
PT
2792
2793/* Log data record types - there is no specific reason that these need to
2794 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2795 */
2796#define LLOG_OP_MAGIC 0x10600000
2797#define LLOG_OP_MASK 0xfff00000
2798
bfea4767 2799enum llog_op_type {
d7e09d03
PT
2800 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2801 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2802 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2803 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2804 REINT_UNLINK, /* obsolete after 2.5.0 */
2805 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2806 REINT_UNLINK,
2807 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2808 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2809 REINT_SETATTR,
2810 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2811 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2812 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2813 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2814 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2815 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
99a92265 2816 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
d7e09d03
PT
2817 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2818 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
bfea4767 2819};
d7e09d03
PT
2820
2821#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2822 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2823
2824/** Log record header - stored in little endian order.
2825 * Each record must start with this struct, end with a llog_rec_tail,
2826 * and be a multiple of 256 bits in size.
2827 */
2828struct llog_rec_hdr {
2829 __u32 lrh_len;
2830 __u32 lrh_index;
2831 __u32 lrh_type;
2832 __u32 lrh_id;
2833};
2834
2835struct llog_rec_tail {
2836 __u32 lrt_len;
2837 __u32 lrt_index;
2838};
2839
2840/* Where data follow just after header */
2841#define REC_DATA(ptr) \
2842 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
2843
2844#define REC_DATA_LEN(rec) \
2845 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
2846 sizeof(struct llog_rec_tail))
2847
2848struct llog_logid_rec {
2849 struct llog_rec_hdr lid_hdr;
2850 struct llog_logid lid_id;
2851 __u32 lid_padding1;
2852 __u64 lid_padding2;
2853 __u64 lid_padding3;
2854 struct llog_rec_tail lid_tail;
a6e19b4d 2855} __packed;
d7e09d03
PT
2856
2857struct llog_unlink_rec {
2858 struct llog_rec_hdr lur_hdr;
21aef7d9
OD
2859 __u64 lur_oid;
2860 __u32 lur_oseq;
2861 __u32 lur_count;
d7e09d03 2862 struct llog_rec_tail lur_tail;
a6e19b4d 2863} __packed;
d7e09d03
PT
2864
2865struct llog_unlink64_rec {
2866 struct llog_rec_hdr lur_hdr;
2867 struct lu_fid lur_fid;
21aef7d9 2868 __u32 lur_count; /* to destroy the lost precreated */
d7e09d03
PT
2869 __u32 lur_padding1;
2870 __u64 lur_padding2;
2871 __u64 lur_padding3;
2872 struct llog_rec_tail lur_tail;
a6e19b4d 2873} __packed;
d7e09d03
PT
2874
2875struct llog_setattr64_rec {
2876 struct llog_rec_hdr lsr_hdr;
2877 struct ost_id lsr_oi;
2878 __u32 lsr_uid;
2879 __u32 lsr_uid_h;
2880 __u32 lsr_gid;
2881 __u32 lsr_gid_h;
f7aafa7c 2882 __u64 lsr_valid;
d7e09d03 2883 struct llog_rec_tail lsr_tail;
a6e19b4d 2884} __packed;
d7e09d03
PT
2885
2886struct llog_size_change_rec {
2887 struct llog_rec_hdr lsc_hdr;
2888 struct ll_fid lsc_fid;
2889 __u32 lsc_ioepoch;
2890 __u32 lsc_padding1;
2891 __u64 lsc_padding2;
2892 __u64 lsc_padding3;
2893 struct llog_rec_tail lsc_tail;
a6e19b4d 2894} __packed;
d7e09d03 2895
d7e09d03
PT
2896/* changelog llog name, needed by client replicators */
2897#define CHANGELOG_CATALOG "changelog_catalog"
2898
2899struct changelog_setinfo {
2900 __u64 cs_recno;
2901 __u32 cs_id;
a6e19b4d 2902} __packed;
d7e09d03
PT
2903
2904/** changelog record */
2905struct llog_changelog_rec {
c9fe1f7f
HD
2906 struct llog_rec_hdr cr_hdr;
2907 struct changelog_rec cr; /**< Variable length field */
2908 struct llog_rec_tail cr_do_not_use; /**< for_sizezof_only */
a6e19b4d 2909} __packed;
d7e09d03 2910
d7e09d03
PT
2911struct llog_changelog_user_rec {
2912 struct llog_rec_hdr cur_hdr;
2913 __u32 cur_id;
2914 __u32 cur_padding;
2915 __u64 cur_endrec;
2916 struct llog_rec_tail cur_tail;
a6e19b4d 2917} __packed;
d7e09d03 2918
99a92265 2919enum agent_req_status {
2920 ARS_WAITING,
2921 ARS_STARTED,
2922 ARS_FAILED,
2923 ARS_CANCELED,
2924 ARS_SUCCEED,
2925};
2926
ac8f0a5c 2927static inline const char *agent_req_status2name(const enum agent_req_status ars)
99a92265 2928{
2929 switch (ars) {
2930 case ARS_WAITING:
2931 return "WAITING";
2932 case ARS_STARTED:
2933 return "STARTED";
2934 case ARS_FAILED:
2935 return "FAILED";
2936 case ARS_CANCELED:
2937 return "CANCELED";
2938 case ARS_SUCCEED:
2939 return "SUCCEED";
2940 default:
2941 return "UNKNOWN";
2942 }
2943}
2944
2945static inline bool agent_req_in_final_state(enum agent_req_status ars)
2946{
2947 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
2948 (ars == ARS_CANCELED));
2949}
2950
2951struct llog_agent_req_rec {
2952 struct llog_rec_hdr arr_hdr; /**< record header */
2953 __u32 arr_status; /**< status of the request */
2954 /* must match enum
a1e616b0
OD
2955 * agent_req_status
2956 */
99a92265 2957 __u32 arr_archive_id; /**< backend archive number */
2958 __u64 arr_flags; /**< req flags */
a1e616b0 2959 __u64 arr_compound_id;/**< compound cookie */
99a92265 2960 __u64 arr_req_create; /**< req. creation time */
2961 __u64 arr_req_change; /**< req. status change time */
2962 struct hsm_action_item arr_hai; /**< req. to the agent */
a1e616b0 2963 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
a6e19b4d 2964} __packed;
99a92265 2965
d7e09d03
PT
2966/* Old llog gen for compatibility */
2967struct llog_gen {
2968 __u64 mnt_cnt;
2969 __u64 conn_cnt;
a6e19b4d 2970} __packed;
d7e09d03
PT
2971
2972struct llog_gen_rec {
2973 struct llog_rec_hdr lgr_hdr;
2974 struct llog_gen lgr_gen;
2975 __u64 padding1;
2976 __u64 padding2;
2977 __u64 padding3;
2978 struct llog_rec_tail lgr_tail;
2979};
2980
d7e09d03
PT
2981/* flags for the logs */
2982enum llog_flag {
2983 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
2984 LLOG_F_IS_CAT = 0x2,
2985 LLOG_F_IS_PLAIN = 0x4,
c9fe1f7f 2986 LLOG_F_EXT_JOBID = BIT(3),
b5367061 2987 LLOG_F_IS_FIXSIZE = BIT(4),
c9fe1f7f 2988
b5367061 2989 /*
2990 * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
2991 * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
2992 * because the catlog record is usually fixed size, but its plain
2993 * log record can be variable
2994 */
c9fe1f7f 2995 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
d7e09d03
PT
2996};
2997
11c647ca
AD
2998/* On-disk header structure of each log object, stored in little endian order */
2999#define LLOG_MIN_CHUNK_SIZE 8192
3000#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) +
3001 * sizeof(llh_tail) - sizeof(llh_bitmap)
3002 */
3003#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
3004#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3005
3006/* flags for the logs */
d7e09d03
PT
3007struct llog_log_hdr {
3008 struct llog_rec_hdr llh_hdr;
21aef7d9 3009 __s64 llh_timestamp;
d7e09d03
PT
3010 __u32 llh_count;
3011 __u32 llh_bitmap_offset;
3012 __u32 llh_size;
3013 __u32 llh_flags;
3014 __u32 llh_cat_idx;
3015 /* for a catalog the first plain slot is next to it */
3016 struct obd_uuid llh_tgtuuid;
cd94f231 3017 __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
11c647ca
AD
3018 /* These fields must always be at the end of the llog_log_hdr.
3019 * Note: llh_bitmap size is variable because llog chunk size could be
3020 * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
3021 * bytes, and the real size is stored in llh_hdr.lrh_len, which means
3022 * llh_tail should only be referred by LLOG_HDR_TAIL().
3023 * But this structure is also used by client/server llog interface
3024 * (see llog_client.c), it will be kept in its original way to avoid
3025 * compatibility issue.
3026 */
cd94f231 3027 __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
d7e09d03 3028 struct llog_rec_tail llh_tail;
a6e19b4d 3029} __packed;
d7e09d03 3030
11c647ca
AD
3031#undef LLOG_HEADER_SIZE
3032#undef LLOG_BITMAP_BYTES
3033
3034#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3035 llh->llh_bitmap_offset - \
3036 sizeof(llh->llh_tail)) * 8)
3037#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
3038 (llh)->llh_bitmap_offset)
3039#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
3040 llh->llh_hdr.lrh_len - \
3041 sizeof(llh->llh_tail)))
d7e09d03 3042
a1e616b0
OD
3043/** log cookies are used to reference a specific log file and a record
3044 * therein
3045 */
d7e09d03
PT
3046struct llog_cookie {
3047 struct llog_logid lgc_lgl;
3048 __u32 lgc_subsys;
3049 __u32 lgc_index;
3050 __u32 lgc_padding;
a6e19b4d 3051} __packed;
d7e09d03
PT
3052
3053/** llog protocol */
3054enum llogd_rpc_ops {
3055 LLOG_ORIGIN_HANDLE_CREATE = 501,
3056 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3057 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3058 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3059 LLOG_ORIGIN_HANDLE_CLOSE = 505,
a1e616b0 3060 LLOG_ORIGIN_CONNECT = 506,
d7e09d03
PT
3061 LLOG_CATINFO = 507, /* deprecated */
3062 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3063 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3064 LLOG_LAST_OPC,
3065 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3066};
3067
3068struct llogd_body {
3069 struct llog_logid lgd_logid;
3070 __u32 lgd_ctxt_idx;
3071 __u32 lgd_llh_flags;
3072 __u32 lgd_index;
3073 __u32 lgd_saved_index;
3074 __u32 lgd_len;
3075 __u64 lgd_cur_offset;
a6e19b4d 3076} __packed;
d7e09d03
PT
3077
3078struct llogd_conn_body {
3079 struct llog_gen lgdc_gen;
3080 struct llog_logid lgdc_logid;
3081 __u32 lgdc_ctxt_idx;
a6e19b4d 3082} __packed;
d7e09d03
PT
3083
3084/* Note: 64-bit types are 64-bit aligned in structure */
3085struct obdo {
21aef7d9
OD
3086 __u64 o_valid; /* hot fields in this obdo */
3087 struct ost_id o_oi;
3088 __u64 o_parent_seq;
3089 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3090 __s64 o_mtime;
3091 __s64 o_atime;
3092 __s64 o_ctime;
3093 __u64 o_blocks; /* brw: cli sent cached bytes */
3094 __u64 o_grant;
d7e09d03
PT
3095
3096 /* 32-bit fields start here: keep an even number of them via padding */
21aef7d9
OD
3097 __u32 o_blksize; /* optimal IO blocksize */
3098 __u32 o_mode; /* brw: cli sent cache remain */
3099 __u32 o_uid;
3100 __u32 o_gid;
3101 __u32 o_flags;
3102 __u32 o_nlink; /* brw: checksum */
3103 __u32 o_parent_oid;
3104 __u32 o_misc; /* brw: o_dropped */
d7e09d03
PT
3105
3106 __u64 o_ioepoch; /* epoch in ost writes */
3107 __u32 o_stripe_idx; /* holds stripe idx */
3108 __u32 o_parent_ver;
a1e616b0
OD
3109 struct lustre_handle o_handle; /* brw: lock handle to prolong locks
3110 */
0a4bea92
JH
3111 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS,
3112 * obsolete in 2.8, reused in OSP
a1e616b0 3113 */
d7e09d03
PT
3114 __u32 o_uid_h;
3115 __u32 o_gid_h;
3116
3117 __u64 o_data_version; /* getattr: sum of iversion for
3118 * each stripe.
3119 * brw: grant space consumed on
a1e616b0
OD
3120 * the client for the write
3121 */
d7e09d03
PT
3122 __u64 o_padding_4;
3123 __u64 o_padding_5;
3124 __u64 o_padding_6;
3125};
3126
3127#define o_dirty o_blocks
3128#define o_undirty o_mode
3129#define o_dropped o_misc
3130#define o_cksum o_nlink
3131#define o_grant_used o_data_version
3132
d7e09d03
PT
3133/* request structure for OST's */
3134struct ost_body {
3135 struct obdo oa;
3136};
3137
3138/* Key for FIEMAP to be used in get_info calls */
3139struct ll_fiemap_info_key {
cbd4d4a8
BJ
3140 char lfik_name[8];
3141 struct obdo lfik_oa;
3142 struct fiemap lfik_fiemap;
d7e09d03
PT
3143};
3144
d7e09d03
PT
3145/* Functions for dumping PTLRPC fields */
3146void dump_rniobuf(struct niobuf_remote *rnb);
3147void dump_ioo(struct obd_ioobj *nb);
d7e09d03
PT
3148void dump_ost_body(struct ost_body *ob);
3149void dump_rcs(__u32 *rc);
3150
d7e09d03 3151/* security opcodes */
9e04cbfc 3152enum sec_cmd {
d7e09d03
PT
3153 SEC_CTX_INIT = 801,
3154 SEC_CTX_INIT_CONT = 802,
3155 SEC_CTX_FINI = 803,
3156 SEC_LAST_OPC,
3157 SEC_FIRST_OPC = SEC_CTX_INIT
9e04cbfc 3158};
d7e09d03
PT
3159
3160/*
3161 * capa related definitions
3162 */
3163#define CAPA_HMAC_MAX_LEN 64
3164#define CAPA_HMAC_KEY_MAX_LEN 56
3165
3166/* NB take care when changing the sequence of elements this struct,
a1e616b0
OD
3167 * because the offset info is used in find_capa()
3168 */
d7e09d03
PT
3169struct lustre_capa {
3170 struct lu_fid lc_fid; /** fid */
3171 __u64 lc_opc; /** operations allowed */
3172 __u64 lc_uid; /** file owner */
3173 __u64 lc_gid; /** file group */
3174 __u32 lc_flags; /** HMAC algorithm & flags */
3175 __u32 lc_keyid; /** key# used for the capability */
3176 __u32 lc_timeout; /** capa timeout value (sec) */
a6c8746d 3177/* FIXME: y2038 time_t overflow: */
d7e09d03
PT
3178 __u32 lc_expiry; /** expiry time (sec) */
3179 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
a6e19b4d 3180} __packed;
d7e09d03 3181
d7e09d03
PT
3182/** lustre_capa::lc_opc */
3183enum {
cd94f231
OD
3184 CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
3185 CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */
3186 CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */
3187 CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */
3188 CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */
3189 CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */
3190 CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */
3191 CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */
3192 CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */
3193 CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */
3194 CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */
d7e09d03
PT
3195};
3196
3197#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3198#define CAPA_OPC_MDS_ONLY \
3199 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3200 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3201#define CAPA_OPC_OSS_ONLY \
3202 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3203 CAPA_OPC_OSS_DESTROY)
3204#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3205#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3206
d7e09d03
PT
3207struct lustre_capa_key {
3208 __u64 lk_seq; /**< mds# */
3209 __u32 lk_keyid; /**< key# */
3210 __u32 lk_padding;
3211 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
a6e19b4d 3212} __packed;
d7e09d03 3213
d7e09d03
PT
3214/** The link ea holds 1 \a link_ea_entry for each hardlink */
3215#define LINK_EA_MAGIC 0x11EAF1DFUL
3216struct link_ea_header {
3217 __u32 leh_magic;
3218 __u32 leh_reccount;
3219 __u64 leh_len; /* total size */
3220 /* future use */
3221 __u32 padding1;
3222 __u32 padding2;
3223};
3224
3225/** Hardlink data is name and parent fid.
3226 * Stored in this crazy struct for maximum packing and endian-neutrality
3227 */
3228struct link_ea_entry {
3229 /** __u16 stored big-endian, unaligned */
3230 unsigned char lee_reclen[2];
3231 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3232 char lee_name[0];
a6e19b4d 3233} __packed;
d7e09d03
PT
3234
3235/** fid2path request/reply structure */
3236struct getinfo_fid2path {
3237 struct lu_fid gf_fid;
3238 __u64 gf_recno;
3239 __u32 gf_linkno;
3240 __u32 gf_pathlen;
3241 char gf_path[0];
a6e19b4d 3242} __packed;
d7e09d03 3243
a6d879fd
HD
3244/** path2parent request/reply structures */
3245struct getparent {
3246 struct lu_fid gp_fid; /**< parent FID */
3247 __u32 gp_linkno; /**< hardlink number */
3248 __u32 gp_name_size; /**< size of the name field */
3249 char gp_name[0]; /**< zero-terminated link name */
3250} __packed;
3251
d7e09d03
PT
3252enum {
3253 LAYOUT_INTENT_ACCESS = 0,
3254 LAYOUT_INTENT_READ = 1,
3255 LAYOUT_INTENT_WRITE = 2,
3256 LAYOUT_INTENT_GLIMPSE = 3,
3257 LAYOUT_INTENT_TRUNC = 4,
3258 LAYOUT_INTENT_RELEASE = 5,
3259 LAYOUT_INTENT_RESTORE = 6
3260};
3261
3262/* enqueue layout lock with intent */
3263struct layout_intent {
3264 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3265 __u32 li_flags;
3266 __u64 li_start;
3267 __u64 li_end;
3268};
3269
d7e09d03
PT
3270/**
3271 * On the wire version of hsm_progress structure.
3272 *
3273 * Contains the userspace hsm_progress and some internal fields.
3274 */
3275struct hsm_progress_kernel {
3276 /* Field taken from struct hsm_progress */
d8f6bc9a 3277 struct lu_fid hpk_fid;
d7e09d03
PT
3278 __u64 hpk_cookie;
3279 struct hsm_extent hpk_extent;
3280 __u16 hpk_flags;
3281 __u16 hpk_errval; /* positive val */
3282 __u32 hpk_padding1;
3283 /* Additional fields */
3284 __u64 hpk_data_version;
3285 __u64 hpk_padding2;
a6e19b4d 3286} __packed;
d7e09d03 3287
d7e09d03
PT
3288/** layout swap request structure
3289 * fid1 and fid2 are in mdt_body
3290 */
3291struct mdc_swap_layouts {
3292 __u64 msl_flags;
3293} __packed;
3294
48d23e61
JX
3295struct close_data {
3296 struct lustre_handle cd_handle;
3297 struct lu_fid cd_fid;
3298 __u64 cd_data_version;
3299 __u64 cd_reserved[8];
3300};
3301
d7e09d03
PT
3302#endif
3303/** @} lustreidl */