]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / include / lustre / lustre_idl.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/include/lustre/lustre_idl.h
37 *
38 * Lustre wire protocol definitions.
39 */
40
41 /** \defgroup lustreidl lustreidl
42 *
43 * Lustre wire protocol definitions.
44 *
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
47 *
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
53 *
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
58 *
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
64 *
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
70 *
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
75 *
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
80 *
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
83 *
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
87 *
88 * @{
89 */
90
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
93
94 #include "../../../include/linux/libcfs/libcfs.h"
95 #include "../../../include/linux/lnet/types.h"
96
97 /* Defn's shared with user-space. */
98 #include "lustre_user.h"
99 #include "lustre_errno.h"
100
101 /*
102 * GENERAL STUFF
103 */
104 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
105 * FOO_REPLY_PORTAL is for incoming replies on the FOO
106 * FOO_BULK_PORTAL is for incoming bulk on the FOO
107 */
108
109 /* Lustre service names are following the format
110 * service name + MDT + seq name
111 */
112 #define LUSTRE_MDT_MAXNAMELEN 80
113
114 #define CONNMGR_REQUEST_PORTAL 1
115 #define CONNMGR_REPLY_PORTAL 2
116 /*#define OSC_REQUEST_PORTAL 3 */
117 #define OSC_REPLY_PORTAL 4
118 /*#define OSC_BULK_PORTAL 5 */
119 #define OST_IO_PORTAL 6
120 #define OST_CREATE_PORTAL 7
121 #define OST_BULK_PORTAL 8
122 /*#define MDC_REQUEST_PORTAL 9 */
123 #define MDC_REPLY_PORTAL 10
124 /*#define MDC_BULK_PORTAL 11 */
125 #define MDS_REQUEST_PORTAL 12
126 /*#define MDS_REPLY_PORTAL 13 */
127 #define MDS_BULK_PORTAL 14
128 #define LDLM_CB_REQUEST_PORTAL 15
129 #define LDLM_CB_REPLY_PORTAL 16
130 #define LDLM_CANCEL_REQUEST_PORTAL 17
131 #define LDLM_CANCEL_REPLY_PORTAL 18
132 /*#define PTLBD_REQUEST_PORTAL 19 */
133 /*#define PTLBD_REPLY_PORTAL 20 */
134 /*#define PTLBD_BULK_PORTAL 21 */
135 #define MDS_SETATTR_PORTAL 22
136 #define MDS_READPAGE_PORTAL 23
137 #define OUT_PORTAL 24
138
139 #define MGC_REPLY_PORTAL 25
140 #define MGS_REQUEST_PORTAL 26
141 #define MGS_REPLY_PORTAL 27
142 #define OST_REQUEST_PORTAL 28
143 #define FLD_REQUEST_PORTAL 29
144 #define SEQ_METADATA_PORTAL 30
145 #define SEQ_DATA_PORTAL 31
146 #define SEQ_CONTROLLER_PORTAL 32
147 #define MGS_BULK_PORTAL 33
148
149 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
150 * n8851@cray.com
151 */
152
153 /* packet types */
154 #define PTL_RPC_MSG_REQUEST 4711
155 #define PTL_RPC_MSG_ERR 4712
156 #define PTL_RPC_MSG_REPLY 4713
157
158 /* DON'T use swabbed values of MAGIC as magic! */
159 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
160 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
161
162 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
163
164 #define PTLRPC_MSG_VERSION 0x00000003
165 #define LUSTRE_VERSION_MASK 0xffff0000
166 #define LUSTRE_OBD_VERSION 0x00010000
167 #define LUSTRE_MDS_VERSION 0x00020000
168 #define LUSTRE_OST_VERSION 0x00030000
169 #define LUSTRE_DLM_VERSION 0x00040000
170 #define LUSTRE_LOG_VERSION 0x00050000
171 #define LUSTRE_MGS_VERSION 0x00060000
172
173 /**
174 * Describes a range of sequence, lsr_start is included but lsr_end is
175 * not in the range.
176 * Same structure is used in fld module where lsr_index field holds mdt id
177 * of the home mdt.
178 */
179 struct lu_seq_range {
180 __u64 lsr_start;
181 __u64 lsr_end;
182 __u32 lsr_index;
183 __u32 lsr_flags;
184 };
185
186 #define LU_SEQ_RANGE_MDT 0x0
187 #define LU_SEQ_RANGE_OST 0x1
188 #define LU_SEQ_RANGE_ANY 0x3
189
190 #define LU_SEQ_RANGE_MASK 0x3
191
192 static inline unsigned fld_range_type(const struct lu_seq_range *range)
193 {
194 return range->lsr_flags & LU_SEQ_RANGE_MASK;
195 }
196
197 static inline int fld_range_is_ost(const struct lu_seq_range *range)
198 {
199 return fld_range_type(range) == LU_SEQ_RANGE_OST;
200 }
201
202 static inline int fld_range_is_mdt(const struct lu_seq_range *range)
203 {
204 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
205 }
206
207 /**
208 * This all range is only being used when fld client sends fld query request,
209 * but it does not know whether the seq is MDT or OST, so it will send req
210 * with ALL type, which means either seq type gotten from lookup can be
211 * expected.
212 */
213 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
214 {
215 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
216 }
217
218 static inline void fld_range_set_type(struct lu_seq_range *range,
219 unsigned flags)
220 {
221 range->lsr_flags |= flags;
222 }
223
224 static inline void fld_range_set_mdt(struct lu_seq_range *range)
225 {
226 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
227 }
228
229 static inline void fld_range_set_ost(struct lu_seq_range *range)
230 {
231 fld_range_set_type(range, LU_SEQ_RANGE_OST);
232 }
233
234 static inline void fld_range_set_any(struct lu_seq_range *range)
235 {
236 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
237 }
238
239 /**
240 * returns width of given range \a r
241 */
242
243 static inline __u64 range_space(const struct lu_seq_range *range)
244 {
245 return range->lsr_end - range->lsr_start;
246 }
247
248 /**
249 * initialize range to zero
250 */
251
252 static inline void range_init(struct lu_seq_range *range)
253 {
254 memset(range, 0, sizeof(*range));
255 }
256
257 /**
258 * check if given seq id \a s is within given range \a r
259 */
260
261 static inline int range_within(const struct lu_seq_range *range,
262 __u64 s)
263 {
264 return s >= range->lsr_start && s < range->lsr_end;
265 }
266
267 static inline int range_is_sane(const struct lu_seq_range *range)
268 {
269 return (range->lsr_end >= range->lsr_start);
270 }
271
272 static inline int range_is_zero(const struct lu_seq_range *range)
273 {
274 return (range->lsr_start == 0 && range->lsr_end == 0);
275 }
276
277 static inline int range_is_exhausted(const struct lu_seq_range *range)
278
279 {
280 return range_space(range) == 0;
281 }
282
283 /* return 0 if two range have the same location */
284 static inline int range_compare_loc(const struct lu_seq_range *r1,
285 const struct lu_seq_range *r2)
286 {
287 return r1->lsr_index != r2->lsr_index ||
288 r1->lsr_flags != r2->lsr_flags;
289 }
290
291 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
292
293 #define PRANGE(range) \
294 (range)->lsr_start, \
295 (range)->lsr_end, \
296 (range)->lsr_index, \
297 fld_range_is_mdt(range) ? "mdt" : "ost"
298
299 /** \defgroup lu_fid lu_fid
300 * @{
301 */
302
303 /**
304 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
305 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
306 * xattr.
307 */
308 enum lma_compat {
309 LMAC_HSM = 0x00000001,
310 LMAC_SOM = 0x00000002,
311 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
312 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
313 * under /O/<seq>/d<x>.
314 */
315 };
316
317 /**
318 * Masks for all features that should be supported by a Lustre version to
319 * access a specific file.
320 * This information is stored in lustre_mdt_attrs::lma_incompat.
321 */
322 enum lma_incompat {
323 LMAI_RELEASED = 0x00000001, /* file is released */
324 LMAI_AGENT = 0x00000002, /* agent inode */
325 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
326 * is on the remote MDT
327 */
328 };
329
330 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
331
332 /**
333 * fid constants
334 */
335 enum {
336 /** LASTID file has zero OID */
337 LUSTRE_FID_LASTID_OID = 0UL,
338 /** initial fid id value */
339 LUSTRE_FID_INIT_OID = 1UL
340 };
341
342 /** returns fid object sequence */
343 static inline __u64 fid_seq(const struct lu_fid *fid)
344 {
345 return fid->f_seq;
346 }
347
348 /** returns fid object id */
349 static inline __u32 fid_oid(const struct lu_fid *fid)
350 {
351 return fid->f_oid;
352 }
353
354 /** returns fid object version */
355 static inline __u32 fid_ver(const struct lu_fid *fid)
356 {
357 return fid->f_ver;
358 }
359
360 static inline void fid_zero(struct lu_fid *fid)
361 {
362 memset(fid, 0, sizeof(*fid));
363 }
364
365 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
366 {
367 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
368 }
369
370 /* copytool uses a 32b bitmask field to encode archive-Ids during register
371 * with MDT thru kuc.
372 * archive num = 0 => all
373 * archive num from 1 to 32
374 */
375 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
376
377 /**
378 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
379 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
380 * used for other purposes and not risk collisions with existing inodes.
381 *
382 * Different FID Format
383 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
384 */
385 enum fid_seq {
386 FID_SEQ_OST_MDT0 = 0,
387 FID_SEQ_LLOG = 1, /* unnamed llogs */
388 FID_SEQ_ECHO = 2,
389 FID_SEQ_OST_MDT1 = 3,
390 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
391 FID_SEQ_LLOG_NAME = 10, /* named llogs */
392 FID_SEQ_RSVD = 11,
393 FID_SEQ_IGIF = 12,
394 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
395 FID_SEQ_IDIF = 0x100000000ULL,
396 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
397 /* Normal FID sequence starts from this value, i.e. 1<<33 */
398 FID_SEQ_START = 0x200000000ULL,
399 /* sequence for local pre-defined FIDs listed in local_oid */
400 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
401 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
402 /* sequence is used for local named objects FIDs generated
403 * by local_object_storage library
404 */
405 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
406 /* Because current FLD will only cache the fid sequence, instead
407 * of oid on the client side, if the FID needs to be exposed to
408 * clients sides, it needs to make sure all of fids under one
409 * sequence will be located in one MDT.
410 */
411 FID_SEQ_SPECIAL = 0x200000004ULL,
412 FID_SEQ_QUOTA = 0x200000005ULL,
413 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
414 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
415 FID_SEQ_NORMAL = 0x200000400ULL,
416 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
417 };
418
419 #define OBIF_OID_MAX_BITS 32
420 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
421 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
422 #define IDIF_OID_MAX_BITS 48
423 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
424 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
425
426 /** OID for FID_SEQ_SPECIAL */
427 enum special_oid {
428 /* Big Filesystem Lock to serialize rename operations */
429 FID_OID_SPECIAL_BFL = 1UL,
430 };
431
432 /** OID for FID_SEQ_DOT_LUSTRE */
433 enum dot_lustre_oid {
434 FID_OID_DOT_LUSTRE = 1UL,
435 FID_OID_DOT_LUSTRE_OBF = 2UL,
436 };
437
438 static inline int fid_seq_is_mdt0(__u64 seq)
439 {
440 return (seq == FID_SEQ_OST_MDT0);
441 }
442
443 static inline int fid_seq_is_mdt(const __u64 seq)
444 {
445 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
446 };
447
448 static inline int fid_seq_is_echo(__u64 seq)
449 {
450 return (seq == FID_SEQ_ECHO);
451 }
452
453 static inline int fid_is_echo(const struct lu_fid *fid)
454 {
455 return fid_seq_is_echo(fid_seq(fid));
456 }
457
458 static inline int fid_seq_is_llog(__u64 seq)
459 {
460 return (seq == FID_SEQ_LLOG);
461 }
462
463 static inline int fid_is_llog(const struct lu_fid *fid)
464 {
465 /* file with OID == 0 is not llog but contains last oid */
466 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
467 }
468
469 static inline int fid_seq_is_rsvd(const __u64 seq)
470 {
471 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
472 };
473
474 static inline int fid_seq_is_special(const __u64 seq)
475 {
476 return seq == FID_SEQ_SPECIAL;
477 };
478
479 static inline int fid_seq_is_local_file(const __u64 seq)
480 {
481 return seq == FID_SEQ_LOCAL_FILE ||
482 seq == FID_SEQ_LOCAL_NAME;
483 };
484
485 static inline int fid_seq_is_root(const __u64 seq)
486 {
487 return seq == FID_SEQ_ROOT;
488 }
489
490 static inline int fid_seq_is_dot(const __u64 seq)
491 {
492 return seq == FID_SEQ_DOT_LUSTRE;
493 }
494
495 static inline int fid_seq_is_default(const __u64 seq)
496 {
497 return seq == FID_SEQ_LOV_DEFAULT;
498 }
499
500 static inline int fid_is_mdt0(const struct lu_fid *fid)
501 {
502 return fid_seq_is_mdt0(fid_seq(fid));
503 }
504
505 static inline void lu_root_fid(struct lu_fid *fid)
506 {
507 fid->f_seq = FID_SEQ_ROOT;
508 fid->f_oid = 1;
509 fid->f_ver = 0;
510 }
511
512 /**
513 * Check if a fid is igif or not.
514 * \param fid the fid to be tested.
515 * \return true if the fid is a igif; otherwise false.
516 */
517 static inline int fid_seq_is_igif(const __u64 seq)
518 {
519 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
520 }
521
522 static inline int fid_is_igif(const struct lu_fid *fid)
523 {
524 return fid_seq_is_igif(fid_seq(fid));
525 }
526
527 /**
528 * Check if a fid is idif or not.
529 * \param fid the fid to be tested.
530 * \return true if the fid is a idif; otherwise false.
531 */
532 static inline int fid_seq_is_idif(const __u64 seq)
533 {
534 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
535 }
536
537 static inline int fid_is_idif(const struct lu_fid *fid)
538 {
539 return fid_seq_is_idif(fid_seq(fid));
540 }
541
542 static inline int fid_is_local_file(const struct lu_fid *fid)
543 {
544 return fid_seq_is_local_file(fid_seq(fid));
545 }
546
547 static inline int fid_seq_is_norm(const __u64 seq)
548 {
549 return (seq >= FID_SEQ_NORMAL);
550 }
551
552 static inline int fid_is_norm(const struct lu_fid *fid)
553 {
554 return fid_seq_is_norm(fid_seq(fid));
555 }
556
557 /* convert an OST objid into an IDIF FID SEQ number */
558 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
559 {
560 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
561 }
562
563 /* convert a packed IDIF FID into an OST objid */
564 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
565 {
566 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
567 }
568
569 /* extract ost index from IDIF FID */
570 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
571 {
572 return (fid_seq(fid) >> 16) & 0xffff;
573 }
574
575 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
576 static inline __u64 ostid_seq(const struct ost_id *ostid)
577 {
578 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
579 return FID_SEQ_OST_MDT0;
580
581 if (fid_seq_is_default(ostid->oi.oi_seq))
582 return FID_SEQ_LOV_DEFAULT;
583
584 if (fid_is_idif(&ostid->oi_fid))
585 return FID_SEQ_OST_MDT0;
586
587 return fid_seq(&ostid->oi_fid);
588 }
589
590 /* extract OST objid from a wire ost_id (id/seq) pair */
591 static inline __u64 ostid_id(const struct ost_id *ostid)
592 {
593 if (fid_seq_is_mdt0(ostid_seq(ostid)))
594 return ostid->oi.oi_id & IDIF_OID_MASK;
595
596 if (fid_is_idif(&ostid->oi_fid))
597 return fid_idif_id(fid_seq(&ostid->oi_fid),
598 fid_oid(&ostid->oi_fid), 0);
599
600 return fid_oid(&ostid->oi_fid);
601 }
602
603 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
604 {
605 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
606 oi->oi.oi_seq = seq;
607 } else {
608 oi->oi_fid.f_seq = seq;
609 /* Note: if f_oid + f_ver is zero, we need init it
610 * to be 1, otherwise, ostid_seq will treat this
611 * as old ostid (oi_seq == 0)
612 */
613 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
614 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
615 }
616 }
617
618 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
619 {
620 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
621 }
622
623 static inline void ostid_set_seq_echo(struct ost_id *oi)
624 {
625 ostid_set_seq(oi, FID_SEQ_ECHO);
626 }
627
628 static inline void ostid_set_seq_llog(struct ost_id *oi)
629 {
630 ostid_set_seq(oi, FID_SEQ_LLOG);
631 }
632
633 /**
634 * Note: we need check oi_seq to decide where to set oi_id,
635 * so oi_seq should always be set ahead of oi_id.
636 */
637 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
638 {
639 if (fid_seq_is_mdt0(ostid_seq(oi))) {
640 if (oid >= IDIF_MAX_OID) {
641 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
642 return;
643 }
644 oi->oi.oi_id = oid;
645 } else {
646 if (oid > OBIF_MAX_OID) {
647 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
648 return;
649 }
650 oi->oi_fid.f_oid = oid;
651 }
652 }
653
654 static inline void ostid_inc_id(struct ost_id *oi)
655 {
656 if (fid_seq_is_mdt0(ostid_seq(oi))) {
657 if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
658 CERROR("Bad inc "DOSTID"\n", POSTID(oi));
659 return;
660 }
661 oi->oi.oi_id++;
662 } else {
663 oi->oi_fid.f_oid++;
664 }
665 }
666
667 static inline void ostid_dec_id(struct ost_id *oi)
668 {
669 if (fid_seq_is_mdt0(ostid_seq(oi)))
670 oi->oi.oi_id--;
671 else
672 oi->oi_fid.f_oid--;
673 }
674
675 /**
676 * Unpack an OST object id/seq (group) into a FID. This is needed for
677 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
678 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
679 * be passed through unchanged. Only legacy OST objects in "group 0"
680 * will be mapped into the IDIF namespace so that they can fit into the
681 * struct lu_fid fields without loss. For reference see:
682 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
683 */
684 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
685 __u32 ost_idx)
686 {
687 if (ost_idx > 0xffff) {
688 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
689 ost_idx);
690 return -EBADF;
691 }
692
693 if (fid_seq_is_mdt0(ostid_seq(ostid))) {
694 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
695 * that we map into the IDIF namespace. It allows up to 2^48
696 * objects per OST, as this is the object namespace that has
697 * been in production for years. This can handle create rates
698 * of 1M objects/s/OST for 9 years, or combinations thereof.
699 */
700 if (ostid_id(ostid) >= IDIF_MAX_OID) {
701 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
702 POSTID(ostid), ost_idx);
703 return -EBADF;
704 }
705 fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
706 /* truncate to 32 bits by assignment */
707 fid->f_oid = ostid_id(ostid);
708 /* in theory, not currently used */
709 fid->f_ver = ostid_id(ostid) >> 48;
710 } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
711 /* This is either an IDIF object, which identifies objects across
712 * all OSTs, or a regular FID. The IDIF namespace maps legacy
713 * OST objects into the FID namespace. In both cases, we just
714 * pass the FID through, no conversion needed.
715 */
716 if (ostid->oi_fid.f_ver != 0) {
717 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
718 POSTID(ostid), ost_idx);
719 return -EBADF;
720 }
721 *fid = ostid->oi_fid;
722 }
723
724 return 0;
725 }
726
727 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
728 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
729 {
730 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
731 CERROR("bad IGIF, "DFID"\n", PFID(fid));
732 return -EBADF;
733 }
734
735 if (fid_is_idif(fid)) {
736 ostid_set_seq_mdt0(ostid);
737 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
738 fid_ver(fid)));
739 } else {
740 ostid->oi_fid = *fid;
741 }
742
743 return 0;
744 }
745
746 /* Check whether the fid is for LAST_ID */
747 static inline int fid_is_last_id(const struct lu_fid *fid)
748 {
749 return (fid_oid(fid) == 0);
750 }
751
752 /**
753 * Get inode number from a igif.
754 * \param fid a igif to get inode number from.
755 * \return inode number for the igif.
756 */
757 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
758 {
759 return fid_seq(fid);
760 }
761
762 void lustre_swab_ost_id(struct ost_id *oid);
763
764 /**
765 * Get inode generation from a igif.
766 * \param fid a igif to get inode generation from.
767 * \return inode generation for the igif.
768 */
769 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
770 {
771 return fid_oid(fid);
772 }
773
774 /**
775 * Build igif from the inode number/generation.
776 */
777 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
778 {
779 fid->f_seq = ino;
780 fid->f_oid = gen;
781 fid->f_ver = 0;
782 }
783
784 /*
785 * Fids are transmitted across network (in the sender byte-ordering),
786 * and stored on disk in big-endian order.
787 */
788 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
789 {
790 dst->f_seq = cpu_to_le64(fid_seq(src));
791 dst->f_oid = cpu_to_le32(fid_oid(src));
792 dst->f_ver = cpu_to_le32(fid_ver(src));
793 }
794
795 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
796 {
797 dst->f_seq = le64_to_cpu(fid_seq(src));
798 dst->f_oid = le32_to_cpu(fid_oid(src));
799 dst->f_ver = le32_to_cpu(fid_ver(src));
800 }
801
802 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
803 {
804 dst->f_seq = cpu_to_be64(fid_seq(src));
805 dst->f_oid = cpu_to_be32(fid_oid(src));
806 dst->f_ver = cpu_to_be32(fid_ver(src));
807 }
808
809 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
810 {
811 dst->f_seq = be64_to_cpu(fid_seq(src));
812 dst->f_oid = be32_to_cpu(fid_oid(src));
813 dst->f_ver = be32_to_cpu(fid_ver(src));
814 }
815
816 static inline int fid_is_sane(const struct lu_fid *fid)
817 {
818 return fid &&
819 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
820 fid_is_igif(fid) || fid_is_idif(fid) ||
821 fid_seq_is_rsvd(fid_seq(fid)));
822 }
823
824 static inline int fid_is_zero(const struct lu_fid *fid)
825 {
826 return fid_seq(fid) == 0 && fid_oid(fid) == 0;
827 }
828
829 void lustre_swab_lu_fid(struct lu_fid *fid);
830 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
831
832 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
833 {
834 return memcmp(f0, f1, sizeof(*f0)) == 0;
835 }
836
837 #define __diff_normalize(val0, val1) \
838 ({ \
839 typeof(val0) __val0 = (val0); \
840 typeof(val1) __val1 = (val1); \
841 \
842 (__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1); \
843 })
844
845 static inline int lu_fid_cmp(const struct lu_fid *f0,
846 const struct lu_fid *f1)
847 {
848 return
849 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
850 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
851 __diff_normalize(fid_ver(f0), fid_ver(f1));
852 }
853
854 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
855 struct ost_id *dst_oi)
856 {
857 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
858 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
859 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
860 } else {
861 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
862 }
863 }
864
865 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
866 struct ost_id *dst_oi)
867 {
868 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
869 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
870 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
871 } else {
872 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
873 }
874 }
875
876 /** @} lu_fid */
877
878 /** \defgroup lu_dir lu_dir
879 * @{
880 */
881
882 /**
883 * Enumeration of possible directory entry attributes.
884 *
885 * Attributes follow directory entry header in the order they appear in this
886 * enumeration.
887 */
888 enum lu_dirent_attrs {
889 LUDA_FID = 0x0001,
890 LUDA_TYPE = 0x0002,
891 LUDA_64BITHASH = 0x0004,
892 };
893
894 /**
895 * Layout of readdir pages, as transmitted on wire.
896 */
897 struct lu_dirent {
898 /** valid if LUDA_FID is set. */
899 struct lu_fid lde_fid;
900 /** a unique entry identifier: a hash or an offset. */
901 __u64 lde_hash;
902 /** total record length, including all attributes. */
903 __u16 lde_reclen;
904 /** name length */
905 __u16 lde_namelen;
906 /** optional variable size attributes following this entry.
907 * taken from enum lu_dirent_attrs.
908 */
909 __u32 lde_attrs;
910 /** name is followed by the attributes indicated in ->ldp_attrs, in
911 * their natural order. After the last attribute, padding bytes are
912 * added to make ->lde_reclen a multiple of 8.
913 */
914 char lde_name[0];
915 };
916
917 /*
918 * Definitions of optional directory entry attributes formats.
919 *
920 * Individual attributes do not have their length encoded in a generic way. It
921 * is assumed that consumer of an attribute knows its format. This means that
922 * it is impossible to skip over an unknown attribute, except by skipping over all
923 * remaining attributes (by using ->lde_reclen), which is not too
924 * constraining, because new server versions will append new attributes at
925 * the end of an entry.
926 */
927
928 /**
929 * Fid directory attribute: a fid of an object referenced by the entry. This
930 * will be almost always requested by the client and supplied by the server.
931 *
932 * Aligned to 8 bytes.
933 */
934 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
935
936 /**
937 * File type.
938 *
939 * Aligned to 2 bytes.
940 */
941 struct luda_type {
942 __u16 lt_type;
943 };
944
945 #ifndef IFSHIFT
946 #define IFSHIFT 12
947 #endif
948
949 #ifndef IFTODT
950 #define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
951 #endif
952 #ifndef DTTOIF
953 #define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
954 #endif
955
956 struct lu_dirpage {
957 __u64 ldp_hash_start;
958 __u64 ldp_hash_end;
959 __u32 ldp_flags;
960 __u32 ldp_pad0;
961 struct lu_dirent ldp_entries[0];
962 };
963
964 enum lu_dirpage_flags {
965 /**
966 * dirpage contains no entry.
967 */
968 LDF_EMPTY = 1 << 0,
969 /**
970 * last entry's lde_hash equals ldp_hash_end.
971 */
972 LDF_COLLIDE = 1 << 1
973 };
974
975 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
976 {
977 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
978 return NULL;
979 else
980 return dp->ldp_entries;
981 }
982
983 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
984 {
985 struct lu_dirent *next;
986
987 if (le16_to_cpu(ent->lde_reclen) != 0)
988 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
989 else
990 next = NULL;
991
992 return next;
993 }
994
995 static inline int lu_dirent_calc_size(int namelen, __u16 attr)
996 {
997 int size;
998
999 if (attr & LUDA_TYPE) {
1000 const unsigned align = sizeof(struct luda_type) - 1;
1001
1002 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1003 size += sizeof(struct luda_type);
1004 } else
1005 size = sizeof(struct lu_dirent) + namelen;
1006
1007 return (size + 7) & ~7;
1008 }
1009
1010 static inline int lu_dirent_size(struct lu_dirent *ent)
1011 {
1012 if (le16_to_cpu(ent->lde_reclen) == 0) {
1013 return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1014 le32_to_cpu(ent->lde_attrs));
1015 }
1016 return le16_to_cpu(ent->lde_reclen);
1017 }
1018
1019 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1020
1021 /**
1022 * MDS_READPAGE page size
1023 *
1024 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1029 */
1030 #define LU_PAGE_SHIFT 12
1031 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1032 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1033
1034 #define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1035
1036 /** @} lu_dir */
1037
1038 struct lustre_handle {
1039 __u64 cookie;
1040 };
1041
1042 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1043
1044 static inline int lustre_handle_is_used(struct lustre_handle *lh)
1045 {
1046 return lh->cookie != 0ull;
1047 }
1048
1049 static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1050 const struct lustre_handle *lh2)
1051 {
1052 return lh1->cookie == lh2->cookie;
1053 }
1054
1055 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1056 struct lustre_handle *src)
1057 {
1058 tgt->cookie = src->cookie;
1059 }
1060
1061 /* flags for lm_flags */
1062 #define MSGHDR_AT_SUPPORT 0x1
1063 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1064
1065 #define lustre_msg lustre_msg_v2
1066 /* we depend on this structure to be 8-byte aligned */
1067 /* this type is only endian-adjusted in lustre_unpack_msg() */
1068 struct lustre_msg_v2 {
1069 __u32 lm_bufcount;
1070 __u32 lm_secflvr;
1071 __u32 lm_magic;
1072 __u32 lm_repsize;
1073 __u32 lm_cksum;
1074 __u32 lm_flags;
1075 __u32 lm_padding_2;
1076 __u32 lm_padding_3;
1077 __u32 lm_buflens[0];
1078 };
1079
1080 /* without gss, ptlrpc_body is put at the first buffer. */
1081 #define PTLRPC_NUM_VERSIONS 4
1082 #define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
1083 struct ptlrpc_body_v3 {
1084 struct lustre_handle pb_handle;
1085 __u32 pb_type;
1086 __u32 pb_version;
1087 __u32 pb_opc;
1088 __u32 pb_status;
1089 __u64 pb_last_xid;
1090 __u64 pb_last_seen;
1091 __u64 pb_last_committed;
1092 __u64 pb_transno;
1093 __u32 pb_flags;
1094 __u32 pb_op_flags;
1095 __u32 pb_conn_cnt;
1096 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1097 __u32 pb_service_time; /* for rep, actual service time */
1098 __u32 pb_limit;
1099 __u64 pb_slv;
1100 /* VBR: pre-versions */
1101 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1102 /* padding for future needs */
1103 __u64 pb_padding[4];
1104 char pb_jobid[JOBSTATS_JOBID_SIZE];
1105 };
1106
1107 #define ptlrpc_body ptlrpc_body_v3
1108
1109 struct ptlrpc_body_v2 {
1110 struct lustre_handle pb_handle;
1111 __u32 pb_type;
1112 __u32 pb_version;
1113 __u32 pb_opc;
1114 __u32 pb_status;
1115 __u64 pb_last_xid;
1116 __u64 pb_last_seen;
1117 __u64 pb_last_committed;
1118 __u64 pb_transno;
1119 __u32 pb_flags;
1120 __u32 pb_op_flags;
1121 __u32 pb_conn_cnt;
1122 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1123 __u32 pb_service_time; /* for rep, actual service time, also used for
1124 * net_latency of req
1125 */
1126 __u32 pb_limit;
1127 __u64 pb_slv;
1128 /* VBR: pre-versions */
1129 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1130 /* padding for future needs */
1131 __u64 pb_padding[4];
1132 };
1133
1134 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1135
1136 /* message body offset for lustre_msg_v2 */
1137 /* ptlrpc body offset in all request/reply messages */
1138 #define MSG_PTLRPC_BODY_OFF 0
1139
1140 /* normal request/reply message record offset */
1141 #define REQ_REC_OFF 1
1142 #define REPLY_REC_OFF 1
1143
1144 /* ldlm request message body offset */
1145 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1146 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1147
1148 /* ldlm intent lock message body offset */
1149 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1150 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1151
1152 /* ldlm reply message body offset */
1153 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1154 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1155
1156 /** only use in req->rq_{req,rep}_swab_mask */
1157 #define MSG_PTLRPC_HEADER_OFF 31
1158
1159 /* Flags that are operation-specific go in the top 16 bits. */
1160 #define MSG_OP_FLAG_MASK 0xffff0000
1161 #define MSG_OP_FLAG_SHIFT 16
1162
1163 /* Flags that apply to all requests are in the bottom 16 bits */
1164 #define MSG_GEN_FLAG_MASK 0x0000ffff
1165 #define MSG_LAST_REPLAY 0x0001
1166 #define MSG_RESENT 0x0002
1167 #define MSG_REPLAY 0x0004
1168 /* #define MSG_AT_SUPPORT 0x0008
1169 * This was used in early prototypes of adaptive timeouts, and while there
1170 * shouldn't be any users of that code there also isn't a need for using this
1171 * bits. Defer usage until at least 1.10 to avoid potential conflict.
1172 */
1173 #define MSG_DELAY_REPLAY 0x0010
1174 #define MSG_VERSION_REPLAY 0x0020
1175 #define MSG_REQ_REPLAY_DONE 0x0040
1176 #define MSG_LOCK_REPLAY_DONE 0x0080
1177
1178 /*
1179 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1180 */
1181
1182 #define MSG_CONNECT_RECOVERING 0x00000001
1183 #define MSG_CONNECT_RECONNECT 0x00000002
1184 #define MSG_CONNECT_REPLAYABLE 0x00000004
1185 /*#define MSG_CONNECT_PEER 0x8 */
1186 #define MSG_CONNECT_LIBCLIENT 0x00000010
1187 #define MSG_CONNECT_INITIAL 0x00000020
1188 #define MSG_CONNECT_ASYNC 0x00000040
1189 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1190 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1191
1192 /* Connect flags */
1193 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1194 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1195 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1196 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1197 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1198 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1199 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1200 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1201 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1202 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1203 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1204 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1205 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1206 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1207 *We do not support JOIN FILE
1208 *anymore, reserve this flags
1209 *just for preventing such bit
1210 *to be reused.
1211 */
1212 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1213 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1214 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1215 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1216 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1217 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1218 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1219 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1220 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1221 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1222 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1223 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1224 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1225 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1226 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1227 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1228 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1229 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1230 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1231 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1232 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1233 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1234 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1235 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1236 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1237 * directory hash
1238 */
1239 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1240 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1241 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1242 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1243 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1244 * RPC error properly
1245 */
1246 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1247 * finer space reservation
1248 */
1249 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1250 * policy and 2.x server
1251 */
1252 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1253 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1254 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1255 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1256 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1257 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1258 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1259
1260 /* XXX README XXX:
1261 * Please DO NOT add flag values here before first ensuring that this same
1262 * flag value is not in use on some other branch. Please clear any such
1263 * changes with senior engineers before starting to use a new flag. Then,
1264 * submit a small patch against EVERY branch that ONLY adds the new flag,
1265 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1266 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1267 * can be approved and landed easily to reserve the flag for future use.
1268 */
1269
1270 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1271 * connection. It is a temporary bug fix for Imperative Recovery interop
1272 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1273 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
1274 */
1275 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1276
1277 #define OCD_HAS_FLAG(ocd, flg) \
1278 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1279
1280 /* Features required for this version of the client to work with server */
1281 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1282 OBD_CONNECT_FULL20)
1283
1284 #define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
1285 ((minor)<<16) + \
1286 ((patch)<<8) + (fix))
1287 #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1288 #define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1289 #define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1290 #define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
1291
1292 /* This structure is used for both request and reply.
1293 *
1294 * If we eventually have separate connect data for different types, which we
1295 * almost certainly will, then perhaps we stick a union in here.
1296 */
1297 struct obd_connect_data_v1 {
1298 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1299 __u32 ocd_version; /* lustre release version number */
1300 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1301 __u32 ocd_index; /* LOV index to connect to */
1302 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1303 __u64 ocd_ibits_known; /* inode bits this client understands */
1304 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1305 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1306 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1307 __u32 ocd_unused; /* also fix lustre_swab_connect */
1308 __u64 ocd_transno; /* first transno from client to be replayed */
1309 __u32 ocd_group; /* MDS group on OST */
1310 __u32 ocd_cksum_types; /* supported checksum algorithms */
1311 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1312 __u32 ocd_instance; /* also fix lustre_swab_connect */
1313 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1314 };
1315
1316 struct obd_connect_data {
1317 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1318 __u32 ocd_version; /* lustre release version number */
1319 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1320 __u32 ocd_index; /* LOV index to connect to */
1321 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1322 __u64 ocd_ibits_known; /* inode bits this client understands */
1323 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1324 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1325 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1326 __u32 ocd_unused; /* also fix lustre_swab_connect */
1327 __u64 ocd_transno; /* first transno from client to be replayed */
1328 __u32 ocd_group; /* MDS group on OST */
1329 __u32 ocd_cksum_types; /* supported checksum algorithms */
1330 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1331 __u32 ocd_instance; /* instance # of this target */
1332 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1333 /* Fields after ocd_maxbytes are only accessible by the receiver
1334 * if the corresponding flag in ocd_connect_flags is set. Accessing
1335 * any field after ocd_maxbytes on the receiver without a valid flag
1336 * may result in out-of-bound memory access and kernel oops.
1337 */
1338 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1339 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1340 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1341 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1342 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1343 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1344 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1345 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1346 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1347 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1348 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1349 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1350 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1351 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1352 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1353 };
1354
1355 /* XXX README XXX:
1356 * Please DO NOT use any fields here before first ensuring that this same
1357 * field is not in use on some other branch. Please clear any such changes
1358 * with senior engineers before starting to use a new field. Then, submit
1359 * a small patch against EVERY branch that ONLY adds the new field along with
1360 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1361 * reserve the flag for future use.
1362 */
1363
1364 void lustre_swab_connect(struct obd_connect_data *ocd);
1365
1366 /*
1367 * Supported checksum algorithms. Up to 32 checksum types are supported.
1368 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1369 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1370 * algorithm and also the OBD_FL_CKSUM* flags.
1371 */
1372 enum cksum_type {
1373 OBD_CKSUM_CRC32 = 0x00000001,
1374 OBD_CKSUM_ADLER = 0x00000002,
1375 OBD_CKSUM_CRC32C = 0x00000004,
1376 };
1377
1378 /*
1379 * OST requests: OBDO & OBD request records
1380 */
1381
1382 /* opcodes */
1383 enum ost_cmd {
1384 OST_REPLY = 0, /* reply ? */
1385 OST_GETATTR = 1,
1386 OST_SETATTR = 2,
1387 OST_READ = 3,
1388 OST_WRITE = 4,
1389 OST_CREATE = 5,
1390 OST_DESTROY = 6,
1391 OST_GET_INFO = 7,
1392 OST_CONNECT = 8,
1393 OST_DISCONNECT = 9,
1394 OST_PUNCH = 10,
1395 OST_OPEN = 11,
1396 OST_CLOSE = 12,
1397 OST_STATFS = 13,
1398 OST_SYNC = 16,
1399 OST_SET_INFO = 17,
1400 OST_QUOTACHECK = 18,
1401 OST_QUOTACTL = 19,
1402 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1403 OST_LAST_OPC
1404 };
1405 #define OST_FIRST_OPC OST_REPLY
1406
1407 enum obdo_flags {
1408 OBD_FL_INLINEDATA = 0x00000001,
1409 OBD_FL_OBDMDEXISTS = 0x00000002,
1410 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1411 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1412 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1413 OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
1414 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1415 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1416 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1417 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1418 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1419 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1420 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1421 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1422 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1423 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1424 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1425 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1426 * XXX: obsoleted - reserved for old
1427 * clients prior than 2.2
1428 */
1429 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1430 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1431
1432 /* Note that while these checksum values are currently separate bits,
1433 * in 2.x we can actually allow all values from 1-31 if we wanted.
1434 */
1435 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1436 OBD_FL_CKSUM_CRC32C,
1437
1438 /* mask for local-only flag, which won't be sent over network */
1439 OBD_FL_LOCAL_MASK = 0xF0000000,
1440 };
1441
1442 #define LOV_MAGIC_V1 0x0BD10BD0
1443 #define LOV_MAGIC LOV_MAGIC_V1
1444 #define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1445 #define LOV_MAGIC_V3 0x0BD30BD0
1446
1447 /*
1448 * magic for fully defined striping
1449 * the idea is that we should have different magics for striping "hints"
1450 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1451 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1452 * we can't just change it w/o long way preparation, but we still need a
1453 * mechanism to allow LOD to differentiate hint versus ready striping.
1454 * so, at the moment we do a trick: MDT knows what to expect from request
1455 * depending on the case (replay uses ready striping, non-replay req uses
1456 * hints), so MDT replaces magic with appropriate one and now LOD can
1457 * easily understand what's inside -bzzz
1458 */
1459 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1460 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1461
1462 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
1463 #define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
1464 #define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
1465 #define LOV_PATTERN_CMOBD 0x200
1466
1467 #define LOV_PATTERN_F_MASK 0xffff0000
1468 #define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
1469
1470 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1471 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1472
1473 #define lov_ost_data lov_ost_data_v1
1474 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1475 struct ost_id l_ost_oi; /* OST object ID */
1476 __u32 l_ost_gen; /* generation of this l_ost_idx */
1477 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1478 };
1479
1480 #define lov_mds_md lov_mds_md_v1
1481 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1482 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1483 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1484 struct ost_id lmm_oi; /* LOV object ID */
1485 __u32 lmm_stripe_size; /* size of stripe in bytes */
1486 /* lmm_stripe_count used to be __u32 */
1487 __u16 lmm_stripe_count; /* num stripes in use for this object */
1488 __u16 lmm_layout_gen; /* layout generation number */
1489 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1490 };
1491
1492 /**
1493 * Sigh, because pre-2.4 uses
1494 * struct lov_mds_md_v1 {
1495 * ........
1496 * __u64 lmm_object_id;
1497 * __u64 lmm_object_seq;
1498 * ......
1499 * }
1500 * to identify the LOV(MDT) object, and lmm_object_seq will
1501 * be normal_fid, which make it hard to combine these conversion
1502 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1503 *
1504 * We can tell the lmm_oi by this way,
1505 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1506 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1507 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1508 * lmm_oi.f_ver = 0
1509 *
1510 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1511 * except for printing some information, and the user can always
1512 * get the real FID from LMA, besides this multiple case check might
1513 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1514 */
1515
1516 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1517 struct ost_id *oi)
1518 {
1519 oi->oi.oi_id = fid_oid(fid);
1520 oi->oi.oi_seq = fid_seq(fid);
1521 }
1522
1523 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1524 {
1525 oi->oi.oi_seq = seq;
1526 }
1527
1528 static inline __u64 lmm_oi_id(struct ost_id *oi)
1529 {
1530 return oi->oi.oi_id;
1531 }
1532
1533 static inline __u64 lmm_oi_seq(struct ost_id *oi)
1534 {
1535 return oi->oi.oi_seq;
1536 }
1537
1538 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1539 struct ost_id *src_oi)
1540 {
1541 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1542 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1543 }
1544
1545 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1546 struct ost_id *src_oi)
1547 {
1548 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1549 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1550 }
1551
1552 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1553
1554 #define MAX_MD_SIZE \
1555 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1556 #define MIN_MD_SIZE \
1557 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1558
1559 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1560 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1561 #define XATTR_USER_PREFIX "user."
1562 #define XATTR_TRUSTED_PREFIX "trusted."
1563 #define XATTR_SECURITY_PREFIX "security."
1564 #define XATTR_LUSTRE_PREFIX "lustre."
1565
1566 #define XATTR_NAME_LOV "trusted.lov"
1567 #define XATTR_NAME_LMA "trusted.lma"
1568 #define XATTR_NAME_LMV "trusted.lmv"
1569 #define XATTR_NAME_LINK "trusted.link"
1570 #define XATTR_NAME_FID "trusted.fid"
1571 #define XATTR_NAME_VERSION "trusted.version"
1572 #define XATTR_NAME_SOM "trusted.som"
1573 #define XATTR_NAME_HSM "trusted.hsm"
1574 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1575
1576 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1577 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1578 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1579 struct ost_id lmm_oi; /* LOV object ID */
1580 __u32 lmm_stripe_size; /* size of stripe in bytes */
1581 /* lmm_stripe_count used to be __u32 */
1582 __u16 lmm_stripe_count; /* num stripes in use for this object */
1583 __u16 lmm_layout_gen; /* layout generation number */
1584 char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1585 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1586 };
1587
1588 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1589 {
1590 if (lmm_magic == LOV_MAGIC_V3)
1591 return sizeof(struct lov_mds_md_v3) +
1592 stripes * sizeof(struct lov_ost_data_v1);
1593 else
1594 return sizeof(struct lov_mds_md_v1) +
1595 stripes * sizeof(struct lov_ost_data_v1);
1596 }
1597
1598 static inline __u32
1599 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1600 {
1601 switch (lmm_magic) {
1602 case LOV_MAGIC_V1: {
1603 struct lov_mds_md_v1 lmm;
1604
1605 if (buf_size < sizeof(lmm))
1606 return 0;
1607
1608 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1609 }
1610 case LOV_MAGIC_V3: {
1611 struct lov_mds_md_v3 lmm;
1612
1613 if (buf_size < sizeof(lmm))
1614 return 0;
1615
1616 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1617 }
1618 default:
1619 return 0;
1620 }
1621 }
1622
1623 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1624 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1625 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1626 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1627 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1628 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1629 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1630 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1631 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1632 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1633 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1634 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1635 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1636 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1637 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1638 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1639 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1640 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1641 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1642 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1643 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1644 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1645 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1646 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1647 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1648 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1649 /* ->mds if epoch opens or closes
1650 */
1651 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1652 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1653 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1654 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1655 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1656
1657 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1658 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1659 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1660 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1661
1662 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1663 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1664 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1665 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1666 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1667 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1668 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1669 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1670 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1671 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1672 * under lock; for xattr
1673 * requests means the
1674 * client holds the lock
1675 */
1676 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1677
1678 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1679 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1680 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1681 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1682
1683 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1684 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1685
1686 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1687 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1688 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1689 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1690 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1691
1692 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1693
1694 /* don't forget obdo_fid which is way down at the bottom so it can
1695 * come after the definition of llog_cookie
1696 */
1697
1698 enum hss_valid {
1699 HSS_SETMASK = 0x01,
1700 HSS_CLEARMASK = 0x02,
1701 HSS_ARCHIVE_ID = 0x04,
1702 };
1703
1704 struct hsm_state_set {
1705 __u32 hss_valid;
1706 __u32 hss_archive_id;
1707 __u64 hss_setmask;
1708 __u64 hss_clearmask;
1709 };
1710
1711 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1712 void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1713
1714 void lustre_swab_obd_statfs(struct obd_statfs *os);
1715
1716 /* ost_body.data values for OST_BRW */
1717
1718 #define OBD_BRW_READ 0x01
1719 #define OBD_BRW_WRITE 0x02
1720 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1721 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1722 * transfer and is not accounted in
1723 * the grant.
1724 */
1725 #define OBD_BRW_CHECK 0x10
1726 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1727 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1728 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1729 #define OBD_BRW_NOQUOTA 0x100
1730 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1731 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1732 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1733 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1734 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1735
1736 #define OBD_OBJECT_EOF 0xffffffffffffffffULL
1737
1738 #define OST_MIN_PRECREATE 32
1739 #define OST_MAX_PRECREATE 20000
1740
1741 struct obd_ioobj {
1742 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1743 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1744 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1745 * high 16 bits in 2.4 and later
1746 */
1747 __u32 ioo_bufcnt; /* number of niobufs for this object */
1748 };
1749
1750 #define IOOBJ_MAX_BRW_BITS 16
1751 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1752 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1753 #define ioobj_max_brw_set(ioo, num) \
1754 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1755
1756 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1757
1758 /* multiple of 8 bytes => can array */
1759 struct niobuf_remote {
1760 __u64 offset;
1761 __u32 len;
1762 __u32 flags;
1763 };
1764
1765 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1766
1767 /* lock value block communicated between the filter and llite */
1768
1769 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1770 * negative, i.e. because ((MASK + rc) & MASK) != MASK.
1771 */
1772 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1773 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1774 #define OST_LVB_IS_ERR(blocks) \
1775 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1776 #define OST_LVB_SET_ERR(blocks, rc) \
1777 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1778 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1779
1780 struct ost_lvb_v1 {
1781 __u64 lvb_size;
1782 __s64 lvb_mtime;
1783 __s64 lvb_atime;
1784 __s64 lvb_ctime;
1785 __u64 lvb_blocks;
1786 };
1787
1788 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1789
1790 struct ost_lvb {
1791 __u64 lvb_size;
1792 __s64 lvb_mtime;
1793 __s64 lvb_atime;
1794 __s64 lvb_ctime;
1795 __u64 lvb_blocks;
1796 __u32 lvb_mtime_ns;
1797 __u32 lvb_atime_ns;
1798 __u32 lvb_ctime_ns;
1799 __u32 lvb_padding;
1800 };
1801
1802 void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1803
1804 /*
1805 * lquota data structures
1806 */
1807
1808 /* The lquota_id structure is an union of all the possible identifier types that
1809 * can be used with quota, this includes:
1810 * - 64-bit user ID
1811 * - 64-bit group ID
1812 * - a FID which can be used for per-directory quota in the future
1813 */
1814 union lquota_id {
1815 struct lu_fid qid_fid; /* FID for per-directory quota */
1816 __u64 qid_uid; /* user identifier */
1817 __u64 qid_gid; /* group identifier */
1818 };
1819
1820 /* quotactl management */
1821 struct obd_quotactl {
1822 __u32 qc_cmd;
1823 __u32 qc_type; /* see Q_* flag below */
1824 __u32 qc_id;
1825 __u32 qc_stat;
1826 struct obd_dqinfo qc_dqinfo;
1827 struct obd_dqblk qc_dqblk;
1828 };
1829
1830 void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1831
1832 #define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
1833 #define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
1834 #define Q_GETOINFO 0x800102 /* get obd quota info */
1835 #define Q_GETOQUOTA 0x800103 /* get obd quotas */
1836 #define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
1837
1838 #define Q_COPY(out, in, member) (out)->member = (in)->member
1839
1840 #define QCTL_COPY(out, in) \
1841 do { \
1842 Q_COPY(out, in, qc_cmd); \
1843 Q_COPY(out, in, qc_type); \
1844 Q_COPY(out, in, qc_id); \
1845 Q_COPY(out, in, qc_stat); \
1846 Q_COPY(out, in, qc_dqinfo); \
1847 Q_COPY(out, in, qc_dqblk); \
1848 } while (0)
1849
1850 /* Data structures associated with the quota locks */
1851
1852 /* Glimpse descriptor used for the index & per-ID quota locks */
1853 struct ldlm_gl_lquota_desc {
1854 union lquota_id gl_id; /* quota ID subject to the glimpse */
1855 __u64 gl_flags; /* see LQUOTA_FL* below */
1856 __u64 gl_ver; /* new index version */
1857 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1858 __u64 gl_softlimit; /* new softlimit */
1859 __u64 gl_time;
1860 __u64 gl_pad2;
1861 };
1862
1863 /* quota glimpse flags */
1864 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1865
1866 /* LVB used with quota (global and per-ID) locks */
1867 struct lquota_lvb {
1868 __u64 lvb_flags; /* see LQUOTA_FL* above */
1869 __u64 lvb_id_may_rel; /* space that might be released later */
1870 __u64 lvb_id_rel; /* space released by the slave for this ID */
1871 __u64 lvb_id_qunit; /* current qunit value */
1872 __u64 lvb_pad1;
1873 };
1874
1875 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1876
1877 /* op codes */
1878 enum quota_cmd {
1879 QUOTA_DQACQ = 601,
1880 QUOTA_DQREL = 602,
1881 QUOTA_LAST_OPC
1882 };
1883 #define QUOTA_FIRST_OPC QUOTA_DQACQ
1884
1885 /*
1886 * MDS REQ RECORDS
1887 */
1888
1889 /* opcodes */
1890 enum mds_cmd {
1891 MDS_GETATTR = 33,
1892 MDS_GETATTR_NAME = 34,
1893 MDS_CLOSE = 35,
1894 MDS_REINT = 36,
1895 MDS_READPAGE = 37,
1896 MDS_CONNECT = 38,
1897 MDS_DISCONNECT = 39,
1898 MDS_GETSTATUS = 40,
1899 MDS_STATFS = 41,
1900 MDS_PIN = 42,
1901 MDS_UNPIN = 43,
1902 MDS_SYNC = 44,
1903 MDS_DONE_WRITING = 45,
1904 MDS_SET_INFO = 46,
1905 MDS_QUOTACHECK = 47,
1906 MDS_QUOTACTL = 48,
1907 MDS_GETXATTR = 49,
1908 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
1909 MDS_WRITEPAGE = 51,
1910 MDS_IS_SUBDIR = 52,
1911 MDS_GET_INFO = 53,
1912 MDS_HSM_STATE_GET = 54,
1913 MDS_HSM_STATE_SET = 55,
1914 MDS_HSM_ACTION = 56,
1915 MDS_HSM_PROGRESS = 57,
1916 MDS_HSM_REQUEST = 58,
1917 MDS_HSM_CT_REGISTER = 59,
1918 MDS_HSM_CT_UNREGISTER = 60,
1919 MDS_SWAP_LAYOUTS = 61,
1920 MDS_LAST_OPC
1921 };
1922
1923 #define MDS_FIRST_OPC MDS_GETATTR
1924
1925 /*
1926 * Do not exceed 63
1927 */
1928
1929 enum mdt_reint_cmd {
1930 REINT_SETATTR = 1,
1931 REINT_CREATE = 2,
1932 REINT_LINK = 3,
1933 REINT_UNLINK = 4,
1934 REINT_RENAME = 5,
1935 REINT_OPEN = 6,
1936 REINT_SETXATTR = 7,
1937 REINT_RMENTRY = 8,
1938 /* REINT_WRITE = 9, */
1939 REINT_MAX
1940 };
1941
1942 void lustre_swab_generic_32s(__u32 *val);
1943
1944 /* the disposition of the intent outlines what was executed */
1945 #define DISP_IT_EXECD 0x00000001
1946 #define DISP_LOOKUP_EXECD 0x00000002
1947 #define DISP_LOOKUP_NEG 0x00000004
1948 #define DISP_LOOKUP_POS 0x00000008
1949 #define DISP_OPEN_CREATE 0x00000010
1950 #define DISP_OPEN_OPEN 0x00000020
1951 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
1952 #define DISP_ENQ_OPEN_REF 0x00800000
1953 #define DISP_ENQ_CREATE_REF 0x01000000
1954 #define DISP_OPEN_LOCK 0x02000000
1955 #define DISP_OPEN_LEASE 0x04000000
1956 #define DISP_OPEN_STRIPE 0x08000000
1957
1958 /* INODE LOCK PARTS */
1959 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
1960 * was used to protect permission (mode,
1961 * owner, group etc) before 2.4.
1962 */
1963 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
1964 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
1965 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
1966
1967 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
1968 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
1969 * Because for remote directories(in DNE), these locks will be granted by
1970 * different MDTs(different ldlm namespace).
1971 *
1972 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
1973 * For Remote directory, the master MDT, where the remote directory is, will
1974 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
1975 * will grant LOOKUP_LOCK.
1976 */
1977 #define MDS_INODELOCK_PERM 0x000010
1978 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
1979
1980 #define MDS_INODELOCK_MAXSHIFT 5
1981 /* This FULL lock is useful to take on unlink sort of operations */
1982 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
1983
1984 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
1985 * but was moved into name[1] along with the OID to avoid consuming the
1986 * name[2,3] fields that need to be used for the quota id (also a FID).
1987 */
1988 enum {
1989 LUSTRE_RES_ID_SEQ_OFF = 0,
1990 LUSTRE_RES_ID_VER_OID_OFF = 1,
1991 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
1992 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
1993 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
1994 LUSTRE_RES_ID_HSH_OFF = 3
1995 };
1996
1997 #define MDS_STATUS_CONN 1
1998 #define MDS_STATUS_LOV 2
1999
2000 /* mdt_thread_info.mti_flags. */
2001 enum md_op_flags {
2002 /* The flag indicates Size-on-MDS attributes are changed. */
2003 MF_SOM_CHANGE = (1 << 0),
2004 /* Flags indicates an epoch opens or closes. */
2005 MF_EPOCH_OPEN = (1 << 1),
2006 MF_EPOCH_CLOSE = (1 << 2),
2007 MF_MDC_CANCEL_FID1 = (1 << 3),
2008 MF_MDC_CANCEL_FID2 = (1 << 4),
2009 MF_MDC_CANCEL_FID3 = (1 << 5),
2010 MF_MDC_CANCEL_FID4 = (1 << 6),
2011 /* There is a pending attribute update. */
2012 MF_SOM_AU = (1 << 7),
2013 /* Cancel OST locks while getattr OST attributes. */
2014 MF_GETATTR_LOCK = (1 << 8),
2015 MF_GET_MDT_IDX = (1 << 9),
2016 };
2017
2018 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2019
2020 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2021
2022 /* these should be identical to their EXT4_*_FL counterparts, they are
2023 * redefined here only to avoid dragging in fs/ext4/ext4.h
2024 */
2025 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2026 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2027 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2028 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2029 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2030
2031 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2032 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2033 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2034 * the S_* flags are kernel-internal values that change between kernel
2035 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2036 * See b=16526 for a full history.
2037 */
2038 static inline int ll_ext_to_inode_flags(int flags)
2039 {
2040 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2041 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2042 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2043 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2044 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2045 }
2046
2047 static inline int ll_inode_to_ext_flags(int iflags)
2048 {
2049 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2050 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2051 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2052 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2053 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2054 }
2055
2056 /* 64 possible states */
2057 enum md_transient_state {
2058 MS_RESTORE = (1 << 0), /* restore is running */
2059 };
2060
2061 struct mdt_body {
2062 struct lu_fid fid1;
2063 struct lu_fid fid2;
2064 struct lustre_handle handle;
2065 __u64 valid;
2066 __u64 size; /* Offset, in the case of MDS_READPAGE */
2067 __s64 mtime;
2068 __s64 atime;
2069 __s64 ctime;
2070 __u64 blocks; /* XID, in the case of MDS_READPAGE */
2071 __u64 ioepoch;
2072 __u64 t_state; /* transient file state defined in
2073 * enum md_transient_state
2074 * was "ino" until 2.4.0
2075 */
2076 __u32 fsuid;
2077 __u32 fsgid;
2078 __u32 capability;
2079 __u32 mode;
2080 __u32 uid;
2081 __u32 gid;
2082 __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2083 __u32 rdev;
2084 __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
2085 __u32 unused2; /* was "generation" until 2.4.0 */
2086 __u32 suppgid;
2087 __u32 eadatasize;
2088 __u32 aclsize;
2089 __u32 max_mdsize;
2090 __u32 max_cookiesize;
2091 __u32 uid_h; /* high 32-bits of uid, for FUID */
2092 __u32 gid_h; /* high 32-bits of gid, for FUID */
2093 __u32 padding_5; /* also fix lustre_swab_mdt_body */
2094 __u64 padding_6;
2095 __u64 padding_7;
2096 __u64 padding_8;
2097 __u64 padding_9;
2098 __u64 padding_10;
2099 }; /* 216 */
2100
2101 void lustre_swab_mdt_body(struct mdt_body *b);
2102
2103 struct mdt_ioepoch {
2104 struct lustre_handle handle;
2105 __u64 ioepoch;
2106 __u32 flags;
2107 __u32 padding;
2108 };
2109
2110 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2111
2112 /* permissions for md_perm.mp_perm */
2113 enum {
2114 CFS_SETUID_PERM = 0x01,
2115 CFS_SETGID_PERM = 0x02,
2116 CFS_SETGRP_PERM = 0x04,
2117 CFS_RMTACL_PERM = 0x08,
2118 CFS_RMTOWN_PERM = 0x10
2119 };
2120
2121 /* inode access permission for remote user, the inode info are omitted,
2122 * for client knows them.
2123 */
2124 struct mdt_remote_perm {
2125 __u32 rp_uid;
2126 __u32 rp_gid;
2127 __u32 rp_fsuid;
2128 __u32 rp_fsuid_h;
2129 __u32 rp_fsgid;
2130 __u32 rp_fsgid_h;
2131 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2132 __u32 rp_padding;
2133 };
2134
2135 void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2136
2137 struct mdt_rec_setattr {
2138 __u32 sa_opcode;
2139 __u32 sa_cap;
2140 __u32 sa_fsuid;
2141 __u32 sa_fsuid_h;
2142 __u32 sa_fsgid;
2143 __u32 sa_fsgid_h;
2144 __u32 sa_suppgid;
2145 __u32 sa_suppgid_h;
2146 __u32 sa_padding_1;
2147 __u32 sa_padding_1_h;
2148 struct lu_fid sa_fid;
2149 __u64 sa_valid;
2150 __u32 sa_uid;
2151 __u32 sa_gid;
2152 __u64 sa_size;
2153 __u64 sa_blocks;
2154 __s64 sa_mtime;
2155 __s64 sa_atime;
2156 __s64 sa_ctime;
2157 __u32 sa_attr_flags;
2158 __u32 sa_mode;
2159 __u32 sa_bias; /* some operation flags */
2160 __u32 sa_padding_3;
2161 __u32 sa_padding_4;
2162 __u32 sa_padding_5;
2163 };
2164
2165 void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2166
2167 /*
2168 * Attribute flags used in mdt_rec_setattr::sa_valid.
2169 * The kernel's #defines for ATTR_* should not be used over the network
2170 * since the client and MDS may run different kernels (see bug 13828)
2171 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2172 */
2173 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2174 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2175 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2176 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2177 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2178 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2179 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2180 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2181 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2182 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2183 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2184 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2185 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2186 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2187 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
2188 * ie O_TRUNC
2189 */
2190 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2191
2192 #define MDS_FMODE_CLOSED 00000000
2193 #define MDS_FMODE_EXEC 00000004
2194 /* IO Epoch is opened on a closed file. */
2195 #define MDS_FMODE_EPOCH 01000000
2196 /* IO Epoch is opened on a file truncate. */
2197 #define MDS_FMODE_TRUNC 02000000
2198 /* Size-on-MDS Attribute Update is pending. */
2199 #define MDS_FMODE_SOM 04000000
2200
2201 #define MDS_OPEN_CREATED 00000010
2202 #define MDS_OPEN_CROSS 00000020
2203
2204 #define MDS_OPEN_CREAT 00000100
2205 #define MDS_OPEN_EXCL 00000200
2206 #define MDS_OPEN_TRUNC 00001000
2207 #define MDS_OPEN_APPEND 00002000
2208 #define MDS_OPEN_SYNC 00010000
2209 #define MDS_OPEN_DIRECTORY 00200000
2210
2211 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2212 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2213 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2214 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2215 * We do not support JOIN FILE
2216 * anymore, reserve this flags
2217 * just for preventing such bit
2218 * to be reused.
2219 */
2220
2221 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2222 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2223 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2224 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2225 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2226 * hsm restore) */
2227 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2228 unlinked */
2229 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2230 * delegation, succeed if it's not
2231 * being opened with conflict mode.
2232 */
2233 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2234
2235 enum mds_op_bias {
2236 MDS_CHECK_SPLIT = 1 << 0,
2237 MDS_CROSS_REF = 1 << 1,
2238 MDS_VTX_BYPASS = 1 << 2,
2239 MDS_PERM_BYPASS = 1 << 3,
2240 MDS_SOM = 1 << 4,
2241 MDS_QUOTA_IGNORE = 1 << 5,
2242 MDS_CLOSE_CLEANUP = 1 << 6,
2243 MDS_KEEP_ORPHAN = 1 << 7,
2244 MDS_RECOV_OPEN = 1 << 8,
2245 MDS_DATA_MODIFIED = 1 << 9,
2246 MDS_CREATE_VOLATILE = 1 << 10,
2247 MDS_OWNEROVERRIDE = 1 << 11,
2248 MDS_HSM_RELEASE = 1 << 12,
2249 };
2250
2251 /* instance of mdt_reint_rec */
2252 struct mdt_rec_create {
2253 __u32 cr_opcode;
2254 __u32 cr_cap;
2255 __u32 cr_fsuid;
2256 __u32 cr_fsuid_h;
2257 __u32 cr_fsgid;
2258 __u32 cr_fsgid_h;
2259 __u32 cr_suppgid1;
2260 __u32 cr_suppgid1_h;
2261 __u32 cr_suppgid2;
2262 __u32 cr_suppgid2_h;
2263 struct lu_fid cr_fid1;
2264 struct lu_fid cr_fid2;
2265 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2266 __s64 cr_time;
2267 __u64 cr_rdev;
2268 __u64 cr_ioepoch;
2269 __u64 cr_padding_1; /* rr_blocks */
2270 __u32 cr_mode;
2271 __u32 cr_bias;
2272 /* use of helpers set/get_mrc_cr_flags() is needed to access
2273 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2274 * extend cr_flags size without breaking 1.8 compat
2275 */
2276 __u32 cr_flags_l; /* for use with open, low 32 bits */
2277 __u32 cr_flags_h; /* for use with open, high 32 bits */
2278 __u32 cr_umask; /* umask for create */
2279 __u32 cr_padding_4; /* rr_padding_4 */
2280 };
2281
2282 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2283 {
2284 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2285 mrc->cr_flags_h = (__u32)(flags >> 32);
2286 }
2287
2288 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2289 {
2290 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2291 }
2292
2293 /* instance of mdt_reint_rec */
2294 struct mdt_rec_link {
2295 __u32 lk_opcode;
2296 __u32 lk_cap;
2297 __u32 lk_fsuid;
2298 __u32 lk_fsuid_h;
2299 __u32 lk_fsgid;
2300 __u32 lk_fsgid_h;
2301 __u32 lk_suppgid1;
2302 __u32 lk_suppgid1_h;
2303 __u32 lk_suppgid2;
2304 __u32 lk_suppgid2_h;
2305 struct lu_fid lk_fid1;
2306 struct lu_fid lk_fid2;
2307 __s64 lk_time;
2308 __u64 lk_padding_1; /* rr_atime */
2309 __u64 lk_padding_2; /* rr_ctime */
2310 __u64 lk_padding_3; /* rr_size */
2311 __u64 lk_padding_4; /* rr_blocks */
2312 __u32 lk_bias;
2313 __u32 lk_padding_5; /* rr_mode */
2314 __u32 lk_padding_6; /* rr_flags */
2315 __u32 lk_padding_7; /* rr_padding_2 */
2316 __u32 lk_padding_8; /* rr_padding_3 */
2317 __u32 lk_padding_9; /* rr_padding_4 */
2318 };
2319
2320 /* instance of mdt_reint_rec */
2321 struct mdt_rec_unlink {
2322 __u32 ul_opcode;
2323 __u32 ul_cap;
2324 __u32 ul_fsuid;
2325 __u32 ul_fsuid_h;
2326 __u32 ul_fsgid;
2327 __u32 ul_fsgid_h;
2328 __u32 ul_suppgid1;
2329 __u32 ul_suppgid1_h;
2330 __u32 ul_suppgid2;
2331 __u32 ul_suppgid2_h;
2332 struct lu_fid ul_fid1;
2333 struct lu_fid ul_fid2;
2334 __s64 ul_time;
2335 __u64 ul_padding_2; /* rr_atime */
2336 __u64 ul_padding_3; /* rr_ctime */
2337 __u64 ul_padding_4; /* rr_size */
2338 __u64 ul_padding_5; /* rr_blocks */
2339 __u32 ul_bias;
2340 __u32 ul_mode;
2341 __u32 ul_padding_6; /* rr_flags */
2342 __u32 ul_padding_7; /* rr_padding_2 */
2343 __u32 ul_padding_8; /* rr_padding_3 */
2344 __u32 ul_padding_9; /* rr_padding_4 */
2345 };
2346
2347 /* instance of mdt_reint_rec */
2348 struct mdt_rec_rename {
2349 __u32 rn_opcode;
2350 __u32 rn_cap;
2351 __u32 rn_fsuid;
2352 __u32 rn_fsuid_h;
2353 __u32 rn_fsgid;
2354 __u32 rn_fsgid_h;
2355 __u32 rn_suppgid1;
2356 __u32 rn_suppgid1_h;
2357 __u32 rn_suppgid2;
2358 __u32 rn_suppgid2_h;
2359 struct lu_fid rn_fid1;
2360 struct lu_fid rn_fid2;
2361 __s64 rn_time;
2362 __u64 rn_padding_1; /* rr_atime */
2363 __u64 rn_padding_2; /* rr_ctime */
2364 __u64 rn_padding_3; /* rr_size */
2365 __u64 rn_padding_4; /* rr_blocks */
2366 __u32 rn_bias; /* some operation flags */
2367 __u32 rn_mode; /* cross-ref rename has mode */
2368 __u32 rn_padding_5; /* rr_flags */
2369 __u32 rn_padding_6; /* rr_padding_2 */
2370 __u32 rn_padding_7; /* rr_padding_3 */
2371 __u32 rn_padding_8; /* rr_padding_4 */
2372 };
2373
2374 /* instance of mdt_reint_rec */
2375 struct mdt_rec_setxattr {
2376 __u32 sx_opcode;
2377 __u32 sx_cap;
2378 __u32 sx_fsuid;
2379 __u32 sx_fsuid_h;
2380 __u32 sx_fsgid;
2381 __u32 sx_fsgid_h;
2382 __u32 sx_suppgid1;
2383 __u32 sx_suppgid1_h;
2384 __u32 sx_suppgid2;
2385 __u32 sx_suppgid2_h;
2386 struct lu_fid sx_fid;
2387 __u64 sx_padding_1; /* These three are rr_fid2 */
2388 __u32 sx_padding_2;
2389 __u32 sx_padding_3;
2390 __u64 sx_valid;
2391 __s64 sx_time;
2392 __u64 sx_padding_5; /* rr_ctime */
2393 __u64 sx_padding_6; /* rr_size */
2394 __u64 sx_padding_7; /* rr_blocks */
2395 __u32 sx_size;
2396 __u32 sx_flags;
2397 __u32 sx_padding_8; /* rr_flags */
2398 __u32 sx_padding_9; /* rr_padding_2 */
2399 __u32 sx_padding_10; /* rr_padding_3 */
2400 __u32 sx_padding_11; /* rr_padding_4 */
2401 };
2402
2403 /*
2404 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2405 * Do NOT change the size of various members, otherwise the value
2406 * will be broken in lustre_swab_mdt_rec_reint().
2407 *
2408 * If you add new members in other mdt_reint_xxx structures and need to use the
2409 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2410 */
2411 struct mdt_rec_reint {
2412 __u32 rr_opcode;
2413 __u32 rr_cap;
2414 __u32 rr_fsuid;
2415 __u32 rr_fsuid_h;
2416 __u32 rr_fsgid;
2417 __u32 rr_fsgid_h;
2418 __u32 rr_suppgid1;
2419 __u32 rr_suppgid1_h;
2420 __u32 rr_suppgid2;
2421 __u32 rr_suppgid2_h;
2422 struct lu_fid rr_fid1;
2423 struct lu_fid rr_fid2;
2424 __s64 rr_mtime;
2425 __s64 rr_atime;
2426 __s64 rr_ctime;
2427 __u64 rr_size;
2428 __u64 rr_blocks;
2429 __u32 rr_bias;
2430 __u32 rr_mode;
2431 __u32 rr_flags;
2432 __u32 rr_flags_h;
2433 __u32 rr_umask;
2434 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2435 };
2436
2437 void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2438
2439 struct lmv_desc {
2440 __u32 ld_tgt_count; /* how many MDS's */
2441 __u32 ld_active_tgt_count; /* how many active */
2442 __u32 ld_default_stripe_count; /* how many objects are used */
2443 __u32 ld_pattern; /* default MEA_MAGIC_* */
2444 __u64 ld_default_hash_size;
2445 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2446 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2447 __u32 ld_qos_maxage; /* in second */
2448 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2449 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2450 struct obd_uuid ld_uuid;
2451 };
2452
2453 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2454 struct lmv_stripe_md {
2455 __u32 mea_magic;
2456 __u32 mea_count;
2457 __u32 mea_master;
2458 __u32 mea_padding;
2459 char mea_pool_name[LOV_MAXPOOLNAME];
2460 struct lu_fid mea_ids[0];
2461 };
2462
2463 /* lmv structures */
2464 #define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2465 #define MEA_MAGIC_ALL_CHARS 0xb222a11c
2466 #define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
2467
2468 #define MAX_HASH_SIZE_32 0x7fffffffUL
2469 #define MAX_HASH_SIZE 0x7fffffffffffffffULL
2470 #define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2471
2472 enum fld_rpc_opc {
2473 FLD_QUERY = 900,
2474 FLD_LAST_OPC,
2475 FLD_FIRST_OPC = FLD_QUERY
2476 };
2477
2478 enum seq_rpc_opc {
2479 SEQ_QUERY = 700,
2480 SEQ_LAST_OPC,
2481 SEQ_FIRST_OPC = SEQ_QUERY
2482 };
2483
2484 enum seq_op {
2485 SEQ_ALLOC_SUPER = 0,
2486 SEQ_ALLOC_META = 1
2487 };
2488
2489 /*
2490 * LOV data structures
2491 */
2492
2493 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2494 /* The size of the buffer the lov/mdc reserves for the
2495 * array of UUIDs returned by the MDS. With the current
2496 * protocol, this will limit the max number of OSTs per LOV
2497 */
2498
2499 #define LOV_DESC_MAGIC 0xB0CCDE5C
2500 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2501 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2502
2503 /* LOV settings descriptor (should only contain static info) */
2504 struct lov_desc {
2505 __u32 ld_tgt_count; /* how many OBD's */
2506 __u32 ld_active_tgt_count; /* how many active */
2507 __u32 ld_default_stripe_count; /* how many objects are used */
2508 __u32 ld_pattern; /* default PATTERN_RAID0 */
2509 __u64 ld_default_stripe_size; /* in bytes */
2510 __u64 ld_default_stripe_offset; /* in bytes */
2511 __u32 ld_padding_0; /* unused */
2512 __u32 ld_qos_maxage; /* in second */
2513 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2514 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2515 struct obd_uuid ld_uuid;
2516 };
2517
2518 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2519
2520 void lustre_swab_lov_desc(struct lov_desc *ld);
2521
2522 /*
2523 * LDLM requests:
2524 */
2525 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2526 enum ldlm_cmd {
2527 LDLM_ENQUEUE = 101,
2528 LDLM_CONVERT = 102,
2529 LDLM_CANCEL = 103,
2530 LDLM_BL_CALLBACK = 104,
2531 LDLM_CP_CALLBACK = 105,
2532 LDLM_GL_CALLBACK = 106,
2533 LDLM_SET_INFO = 107,
2534 LDLM_LAST_OPC
2535 };
2536 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2537
2538 #define RES_NAME_SIZE 4
2539 struct ldlm_res_id {
2540 __u64 name[RES_NAME_SIZE];
2541 };
2542
2543 #define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
2544 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2545 (res)->lr_name.name[2], (res)->lr_name.name[3]
2546
2547 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2548 const struct ldlm_res_id *res1)
2549 {
2550 return !memcmp(res0, res1, sizeof(*res0));
2551 }
2552
2553 /* lock types */
2554 enum ldlm_mode {
2555 LCK_MINMODE = 0,
2556 LCK_EX = 1,
2557 LCK_PW = 2,
2558 LCK_PR = 4,
2559 LCK_CW = 8,
2560 LCK_CR = 16,
2561 LCK_NL = 32,
2562 LCK_GROUP = 64,
2563 LCK_COS = 128,
2564 LCK_MAXMODE
2565 };
2566
2567 #define LCK_MODE_NUM 8
2568
2569 enum ldlm_type {
2570 LDLM_PLAIN = 10,
2571 LDLM_EXTENT = 11,
2572 LDLM_FLOCK = 12,
2573 LDLM_IBITS = 13,
2574 LDLM_MAX_TYPE
2575 };
2576
2577 #define LDLM_MIN_TYPE LDLM_PLAIN
2578
2579 struct ldlm_extent {
2580 __u64 start;
2581 __u64 end;
2582 __u64 gid;
2583 };
2584
2585 static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2586 struct ldlm_extent *ex2)
2587 {
2588 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2589 }
2590
2591 /* check if @ex1 contains @ex2 */
2592 static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2593 struct ldlm_extent *ex2)
2594 {
2595 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2596 }
2597
2598 struct ldlm_inodebits {
2599 __u64 bits;
2600 };
2601
2602 struct ldlm_flock_wire {
2603 __u64 lfw_start;
2604 __u64 lfw_end;
2605 __u64 lfw_owner;
2606 __u32 lfw_padding;
2607 __u32 lfw_pid;
2608 };
2609
2610 /* it's important that the fields of the ldlm_extent structure match
2611 * the first fields of the ldlm_flock structure because there is only
2612 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2613 * this ever changes we will need to swab the union differently based
2614 * on the resource type.
2615 */
2616
2617 typedef union {
2618 struct ldlm_extent l_extent;
2619 struct ldlm_flock_wire l_flock;
2620 struct ldlm_inodebits l_inodebits;
2621 } ldlm_wire_policy_data_t;
2622
2623 union ldlm_gl_desc {
2624 struct ldlm_gl_lquota_desc lquota_desc;
2625 };
2626
2627 void lustre_swab_gl_desc(union ldlm_gl_desc *);
2628
2629 struct ldlm_intent {
2630 __u64 opc;
2631 };
2632
2633 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2634
2635 struct ldlm_resource_desc {
2636 enum ldlm_type lr_type;
2637 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2638 struct ldlm_res_id lr_name;
2639 };
2640
2641 struct ldlm_lock_desc {
2642 struct ldlm_resource_desc l_resource;
2643 enum ldlm_mode l_req_mode;
2644 enum ldlm_mode l_granted_mode;
2645 ldlm_wire_policy_data_t l_policy_data;
2646 };
2647
2648 #define LDLM_LOCKREQ_HANDLES 2
2649 #define LDLM_ENQUEUE_CANCEL_OFF 1
2650
2651 struct ldlm_request {
2652 __u32 lock_flags;
2653 __u32 lock_count;
2654 struct ldlm_lock_desc lock_desc;
2655 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2656 };
2657
2658 void lustre_swab_ldlm_request(struct ldlm_request *rq);
2659
2660 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2661 * Otherwise, 2 are available.
2662 */
2663 #define ldlm_request_bufsize(count, type) \
2664 ({ \
2665 int _avail = LDLM_LOCKREQ_HANDLES; \
2666 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2667 sizeof(struct ldlm_request) + \
2668 (count > _avail ? count - _avail : 0) * \
2669 sizeof(struct lustre_handle); \
2670 })
2671
2672 struct ldlm_reply {
2673 __u32 lock_flags;
2674 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2675 struct ldlm_lock_desc lock_desc;
2676 struct lustre_handle lock_handle;
2677 __u64 lock_policy_res1;
2678 __u64 lock_policy_res2;
2679 };
2680
2681 void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2682
2683 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2684 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2685
2686 /*
2687 * Opcodes for mountconf (mgs and mgc)
2688 */
2689 enum mgs_cmd {
2690 MGS_CONNECT = 250,
2691 MGS_DISCONNECT,
2692 MGS_EXCEPTION, /* node died, etc. */
2693 MGS_TARGET_REG, /* whenever target starts up */
2694 MGS_TARGET_DEL,
2695 MGS_SET_INFO,
2696 MGS_CONFIG_READ,
2697 MGS_LAST_OPC
2698 };
2699 #define MGS_FIRST_OPC MGS_CONNECT
2700
2701 #define MGS_PARAM_MAXLEN 1024
2702 #define KEY_SET_INFO "set_info"
2703
2704 struct mgs_send_param {
2705 char mgs_param[MGS_PARAM_MAXLEN];
2706 };
2707
2708 /* We pass this info to the MGS so it can write config logs */
2709 #define MTI_NAME_MAXLEN 64
2710 #define MTI_PARAM_MAXLEN 4096
2711 #define MTI_NIDS_MAX 32
2712 struct mgs_target_info {
2713 __u32 mti_lustre_ver;
2714 __u32 mti_stripe_index;
2715 __u32 mti_config_ver;
2716 __u32 mti_flags;
2717 __u32 mti_nid_count;
2718 __u32 mti_instance; /* Running instance of target */
2719 char mti_fsname[MTI_NAME_MAXLEN];
2720 char mti_svname[MTI_NAME_MAXLEN];
2721 char mti_uuid[sizeof(struct obd_uuid)];
2722 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2723 char mti_params[MTI_PARAM_MAXLEN];
2724 };
2725
2726 void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2727
2728 struct mgs_nidtbl_entry {
2729 __u64 mne_version; /* table version of this entry */
2730 __u32 mne_instance; /* target instance # */
2731 __u32 mne_index; /* target index */
2732 __u32 mne_length; /* length of this entry - by bytes */
2733 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2734 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2735 __u8 mne_nid_size; /* size of each NID, by bytes */
2736 __u8 mne_nid_count; /* # of NIDs in buffer */
2737 union {
2738 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2739 } u;
2740 };
2741
2742 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2743
2744 struct mgs_config_body {
2745 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2746 __u64 mcb_offset; /* next index of config log to request */
2747 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2748 __u8 mcb_reserved;
2749 __u8 mcb_bits; /* bits unit size of config log */
2750 __u32 mcb_units; /* # of units for bulk transfer */
2751 };
2752
2753 void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2754
2755 struct mgs_config_res {
2756 __u64 mcr_offset; /* index of last config log */
2757 __u64 mcr_size; /* size of the log */
2758 };
2759
2760 void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2761
2762 /* Config marker flags (in config log) */
2763 #define CM_START 0x01
2764 #define CM_END 0x02
2765 #define CM_SKIP 0x04
2766 #define CM_UPGRADE146 0x08
2767 #define CM_EXCLUDE 0x10
2768 #define CM_START_SKIP (CM_START | CM_SKIP)
2769
2770 struct cfg_marker {
2771 __u32 cm_step; /* aka config version */
2772 __u32 cm_flags;
2773 __u32 cm_vers; /* lustre release version number */
2774 __u32 cm_padding; /* 64 bit align */
2775 __s64 cm_createtime; /*when this record was first created */
2776 __s64 cm_canceltime; /*when this record is no longer valid*/
2777 char cm_tgtname[MTI_NAME_MAXLEN];
2778 char cm_comment[MTI_NAME_MAXLEN];
2779 };
2780
2781 void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2782
2783 /*
2784 * Opcodes for multiple servers.
2785 */
2786
2787 enum obd_cmd {
2788 OBD_PING = 400,
2789 OBD_LOG_CANCEL,
2790 OBD_QC_CALLBACK,
2791 OBD_IDX_READ,
2792 OBD_LAST_OPC
2793 };
2794 #define OBD_FIRST_OPC OBD_PING
2795
2796 /* catalog of log objects */
2797
2798 /** Identifier for a single log object */
2799 struct llog_logid {
2800 struct ost_id lgl_oi;
2801 __u32 lgl_ogen;
2802 } __packed;
2803
2804 /** Records written to the CATALOGS list */
2805 #define CATLIST "CATALOGS"
2806 struct llog_catid {
2807 struct llog_logid lci_logid;
2808 __u32 lci_padding1;
2809 __u32 lci_padding2;
2810 __u32 lci_padding3;
2811 } __packed;
2812
2813 /* Log data record types - there is no specific reason that these need to
2814 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2815 */
2816 #define LLOG_OP_MAGIC 0x10600000
2817 #define LLOG_OP_MASK 0xfff00000
2818
2819 enum llog_op_type {
2820 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2821 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2822 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2823 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2824 REINT_UNLINK, /* obsolete after 2.5.0 */
2825 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2826 REINT_UNLINK,
2827 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2828 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2829 REINT_SETATTR,
2830 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2831 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2832 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2833 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2834 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2835 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2836 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
2837 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2838 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2839 };
2840
2841 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2842 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2843
2844 /** Log record header - stored in little endian order.
2845 * Each record must start with this struct, end with a llog_rec_tail,
2846 * and be a multiple of 256 bits in size.
2847 */
2848 struct llog_rec_hdr {
2849 __u32 lrh_len;
2850 __u32 lrh_index;
2851 __u32 lrh_type;
2852 __u32 lrh_id;
2853 };
2854
2855 struct llog_rec_tail {
2856 __u32 lrt_len;
2857 __u32 lrt_index;
2858 };
2859
2860 /* Where data follow just after header */
2861 #define REC_DATA(ptr) \
2862 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
2863
2864 #define REC_DATA_LEN(rec) \
2865 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
2866 sizeof(struct llog_rec_tail))
2867
2868 struct llog_logid_rec {
2869 struct llog_rec_hdr lid_hdr;
2870 struct llog_logid lid_id;
2871 __u32 lid_padding1;
2872 __u64 lid_padding2;
2873 __u64 lid_padding3;
2874 struct llog_rec_tail lid_tail;
2875 } __packed;
2876
2877 struct llog_unlink_rec {
2878 struct llog_rec_hdr lur_hdr;
2879 __u64 lur_oid;
2880 __u32 lur_oseq;
2881 __u32 lur_count;
2882 struct llog_rec_tail lur_tail;
2883 } __packed;
2884
2885 struct llog_unlink64_rec {
2886 struct llog_rec_hdr lur_hdr;
2887 struct lu_fid lur_fid;
2888 __u32 lur_count; /* to destroy the lost precreated */
2889 __u32 lur_padding1;
2890 __u64 lur_padding2;
2891 __u64 lur_padding3;
2892 struct llog_rec_tail lur_tail;
2893 } __packed;
2894
2895 struct llog_setattr64_rec {
2896 struct llog_rec_hdr lsr_hdr;
2897 struct ost_id lsr_oi;
2898 __u32 lsr_uid;
2899 __u32 lsr_uid_h;
2900 __u32 lsr_gid;
2901 __u32 lsr_gid_h;
2902 __u64 lsr_padding;
2903 struct llog_rec_tail lsr_tail;
2904 } __packed;
2905
2906 struct llog_size_change_rec {
2907 struct llog_rec_hdr lsc_hdr;
2908 struct ll_fid lsc_fid;
2909 __u32 lsc_ioepoch;
2910 __u32 lsc_padding1;
2911 __u64 lsc_padding2;
2912 __u64 lsc_padding3;
2913 struct llog_rec_tail lsc_tail;
2914 } __packed;
2915
2916 /* changelog llog name, needed by client replicators */
2917 #define CHANGELOG_CATALOG "changelog_catalog"
2918
2919 struct changelog_setinfo {
2920 __u64 cs_recno;
2921 __u32 cs_id;
2922 } __packed;
2923
2924 /** changelog record */
2925 struct llog_changelog_rec {
2926 struct llog_rec_hdr cr_hdr;
2927 struct changelog_rec cr;
2928 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
2929 } __packed;
2930
2931 struct llog_changelog_ext_rec {
2932 struct llog_rec_hdr cr_hdr;
2933 struct changelog_ext_rec cr;
2934 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
2935 } __packed;
2936
2937 struct llog_changelog_user_rec {
2938 struct llog_rec_hdr cur_hdr;
2939 __u32 cur_id;
2940 __u32 cur_padding;
2941 __u64 cur_endrec;
2942 struct llog_rec_tail cur_tail;
2943 } __packed;
2944
2945 enum agent_req_status {
2946 ARS_WAITING,
2947 ARS_STARTED,
2948 ARS_FAILED,
2949 ARS_CANCELED,
2950 ARS_SUCCEED,
2951 };
2952
2953 static inline char *agent_req_status2name(enum agent_req_status ars)
2954 {
2955 switch (ars) {
2956 case ARS_WAITING:
2957 return "WAITING";
2958 case ARS_STARTED:
2959 return "STARTED";
2960 case ARS_FAILED:
2961 return "FAILED";
2962 case ARS_CANCELED:
2963 return "CANCELED";
2964 case ARS_SUCCEED:
2965 return "SUCCEED";
2966 default:
2967 return "UNKNOWN";
2968 }
2969 }
2970
2971 static inline bool agent_req_in_final_state(enum agent_req_status ars)
2972 {
2973 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
2974 (ars == ARS_CANCELED));
2975 }
2976
2977 struct llog_agent_req_rec {
2978 struct llog_rec_hdr arr_hdr; /**< record header */
2979 __u32 arr_status; /**< status of the request */
2980 /* must match enum
2981 * agent_req_status
2982 */
2983 __u32 arr_archive_id; /**< backend archive number */
2984 __u64 arr_flags; /**< req flags */
2985 __u64 arr_compound_id;/**< compound cookie */
2986 __u64 arr_req_create; /**< req. creation time */
2987 __u64 arr_req_change; /**< req. status change time */
2988 struct hsm_action_item arr_hai; /**< req. to the agent */
2989 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
2990 } __packed;
2991
2992 /* Old llog gen for compatibility */
2993 struct llog_gen {
2994 __u64 mnt_cnt;
2995 __u64 conn_cnt;
2996 } __packed;
2997
2998 struct llog_gen_rec {
2999 struct llog_rec_hdr lgr_hdr;
3000 struct llog_gen lgr_gen;
3001 __u64 padding1;
3002 __u64 padding2;
3003 __u64 padding3;
3004 struct llog_rec_tail lgr_tail;
3005 };
3006
3007 /* On-disk header structure of each log object, stored in little endian order */
3008 #define LLOG_CHUNK_SIZE 8192
3009 #define LLOG_HEADER_SIZE (96)
3010 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3011
3012 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3013
3014 /* flags for the logs */
3015 enum llog_flag {
3016 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3017 LLOG_F_IS_CAT = 0x2,
3018 LLOG_F_IS_PLAIN = 0x4,
3019 };
3020
3021 struct llog_log_hdr {
3022 struct llog_rec_hdr llh_hdr;
3023 __s64 llh_timestamp;
3024 __u32 llh_count;
3025 __u32 llh_bitmap_offset;
3026 __u32 llh_size;
3027 __u32 llh_flags;
3028 __u32 llh_cat_idx;
3029 /* for a catalog the first plain slot is next to it */
3030 struct obd_uuid llh_tgtuuid;
3031 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3032 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3033 struct llog_rec_tail llh_tail;
3034 } __packed;
3035
3036 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3037 llh->llh_bitmap_offset - \
3038 sizeof(llh->llh_tail)) * 8)
3039
3040 /** log cookies are used to reference a specific log file and a record
3041 * therein
3042 */
3043 struct llog_cookie {
3044 struct llog_logid lgc_lgl;
3045 __u32 lgc_subsys;
3046 __u32 lgc_index;
3047 __u32 lgc_padding;
3048 } __packed;
3049
3050 /** llog protocol */
3051 enum llogd_rpc_ops {
3052 LLOG_ORIGIN_HANDLE_CREATE = 501,
3053 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3054 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3055 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3056 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3057 LLOG_ORIGIN_CONNECT = 506,
3058 LLOG_CATINFO = 507, /* deprecated */
3059 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3060 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3061 LLOG_LAST_OPC,
3062 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3063 };
3064
3065 struct llogd_body {
3066 struct llog_logid lgd_logid;
3067 __u32 lgd_ctxt_idx;
3068 __u32 lgd_llh_flags;
3069 __u32 lgd_index;
3070 __u32 lgd_saved_index;
3071 __u32 lgd_len;
3072 __u64 lgd_cur_offset;
3073 } __packed;
3074
3075 struct llogd_conn_body {
3076 struct llog_gen lgdc_gen;
3077 struct llog_logid lgdc_logid;
3078 __u32 lgdc_ctxt_idx;
3079 } __packed;
3080
3081 /* Note: 64-bit types are 64-bit aligned in structure */
3082 struct obdo {
3083 __u64 o_valid; /* hot fields in this obdo */
3084 struct ost_id o_oi;
3085 __u64 o_parent_seq;
3086 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3087 __s64 o_mtime;
3088 __s64 o_atime;
3089 __s64 o_ctime;
3090 __u64 o_blocks; /* brw: cli sent cached bytes */
3091 __u64 o_grant;
3092
3093 /* 32-bit fields start here: keep an even number of them via padding */
3094 __u32 o_blksize; /* optimal IO blocksize */
3095 __u32 o_mode; /* brw: cli sent cache remain */
3096 __u32 o_uid;
3097 __u32 o_gid;
3098 __u32 o_flags;
3099 __u32 o_nlink; /* brw: checksum */
3100 __u32 o_parent_oid;
3101 __u32 o_misc; /* brw: o_dropped */
3102
3103 __u64 o_ioepoch; /* epoch in ost writes */
3104 __u32 o_stripe_idx; /* holds stripe idx */
3105 __u32 o_parent_ver;
3106 struct lustre_handle o_handle; /* brw: lock handle to prolong locks
3107 */
3108 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
3109 */
3110 __u32 o_uid_h;
3111 __u32 o_gid_h;
3112
3113 __u64 o_data_version; /* getattr: sum of iversion for
3114 * each stripe.
3115 * brw: grant space consumed on
3116 * the client for the write
3117 */
3118 __u64 o_padding_4;
3119 __u64 o_padding_5;
3120 __u64 o_padding_6;
3121 };
3122
3123 #define o_dirty o_blocks
3124 #define o_undirty o_mode
3125 #define o_dropped o_misc
3126 #define o_cksum o_nlink
3127 #define o_grant_used o_data_version
3128
3129 static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3130 struct obdo *wobdo,
3131 const struct obdo *lobdo)
3132 {
3133 *wobdo = *lobdo;
3134 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3135 if (!ocd)
3136 return;
3137
3138 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3139 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3140 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3141 * client communicate with pre-2.4 server
3142 */
3143 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3144 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3145 }
3146 }
3147
3148 static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3149 struct obdo *lobdo,
3150 const struct obdo *wobdo)
3151 {
3152 __u32 local_flags = 0;
3153
3154 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3155 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3156
3157 *lobdo = *wobdo;
3158 if (local_flags != 0) {
3159 lobdo->o_valid |= OBD_MD_FLFLAGS;
3160 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3161 lobdo->o_flags |= local_flags;
3162 }
3163 if (!ocd)
3164 return;
3165
3166 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3167 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3168 /* see above */
3169 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3170 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3171 lobdo->o_oi.oi_fid.f_ver = 0;
3172 }
3173 }
3174
3175 /* request structure for OST's */
3176 struct ost_body {
3177 struct obdo oa;
3178 };
3179
3180 /* Key for FIEMAP to be used in get_info calls */
3181 struct ll_fiemap_info_key {
3182 char name[8];
3183 struct obdo oa;
3184 struct ll_user_fiemap fiemap;
3185 };
3186
3187 void lustre_swab_ost_body(struct ost_body *b);
3188 void lustre_swab_ost_last_id(__u64 *id);
3189 void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3190
3191 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3192 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3193 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3194 int stripe_count);
3195 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3196
3197 /* llog_swab.c */
3198 void lustre_swab_llogd_body(struct llogd_body *d);
3199 void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3200 void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3201 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3202
3203 struct lustre_cfg;
3204 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3205
3206 /* Functions for dumping PTLRPC fields */
3207 void dump_rniobuf(struct niobuf_remote *rnb);
3208 void dump_ioo(struct obd_ioobj *nb);
3209 void dump_ost_body(struct ost_body *ob);
3210 void dump_rcs(__u32 *rc);
3211
3212 /* security opcodes */
3213 enum sec_cmd {
3214 SEC_CTX_INIT = 801,
3215 SEC_CTX_INIT_CONT = 802,
3216 SEC_CTX_FINI = 803,
3217 SEC_LAST_OPC,
3218 SEC_FIRST_OPC = SEC_CTX_INIT
3219 };
3220
3221 /*
3222 * capa related definitions
3223 */
3224 #define CAPA_HMAC_MAX_LEN 64
3225 #define CAPA_HMAC_KEY_MAX_LEN 56
3226
3227 /* NB take care when changing the sequence of elements this struct,
3228 * because the offset info is used in find_capa()
3229 */
3230 struct lustre_capa {
3231 struct lu_fid lc_fid; /** fid */
3232 __u64 lc_opc; /** operations allowed */
3233 __u64 lc_uid; /** file owner */
3234 __u64 lc_gid; /** file group */
3235 __u32 lc_flags; /** HMAC algorithm & flags */
3236 __u32 lc_keyid; /** key# used for the capability */
3237 __u32 lc_timeout; /** capa timeout value (sec) */
3238 /* FIXME: y2038 time_t overflow: */
3239 __u32 lc_expiry; /** expiry time (sec) */
3240 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3241 } __packed;
3242
3243 void lustre_swab_lustre_capa(struct lustre_capa *c);
3244
3245 /** lustre_capa::lc_opc */
3246 enum {
3247 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3248 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3249 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3250 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3251 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3252 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3253 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3254 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3255 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3256 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3257 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3258 };
3259
3260 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3261 #define CAPA_OPC_MDS_ONLY \
3262 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3263 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3264 #define CAPA_OPC_OSS_ONLY \
3265 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3266 CAPA_OPC_OSS_DESTROY)
3267 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3268 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3269
3270 struct lustre_capa_key {
3271 __u64 lk_seq; /**< mds# */
3272 __u32 lk_keyid; /**< key# */
3273 __u32 lk_padding;
3274 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3275 } __packed;
3276
3277 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3278 #define LINK_EA_MAGIC 0x11EAF1DFUL
3279 struct link_ea_header {
3280 __u32 leh_magic;
3281 __u32 leh_reccount;
3282 __u64 leh_len; /* total size */
3283 /* future use */
3284 __u32 padding1;
3285 __u32 padding2;
3286 };
3287
3288 /** Hardlink data is name and parent fid.
3289 * Stored in this crazy struct for maximum packing and endian-neutrality
3290 */
3291 struct link_ea_entry {
3292 /** __u16 stored big-endian, unaligned */
3293 unsigned char lee_reclen[2];
3294 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3295 char lee_name[0];
3296 } __packed;
3297
3298 /** fid2path request/reply structure */
3299 struct getinfo_fid2path {
3300 struct lu_fid gf_fid;
3301 __u64 gf_recno;
3302 __u32 gf_linkno;
3303 __u32 gf_pathlen;
3304 char gf_path[0];
3305 } __packed;
3306
3307 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3308
3309 enum {
3310 LAYOUT_INTENT_ACCESS = 0,
3311 LAYOUT_INTENT_READ = 1,
3312 LAYOUT_INTENT_WRITE = 2,
3313 LAYOUT_INTENT_GLIMPSE = 3,
3314 LAYOUT_INTENT_TRUNC = 4,
3315 LAYOUT_INTENT_RELEASE = 5,
3316 LAYOUT_INTENT_RESTORE = 6
3317 };
3318
3319 /* enqueue layout lock with intent */
3320 struct layout_intent {
3321 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3322 __u32 li_flags;
3323 __u64 li_start;
3324 __u64 li_end;
3325 };
3326
3327 void lustre_swab_layout_intent(struct layout_intent *li);
3328
3329 /**
3330 * On the wire version of hsm_progress structure.
3331 *
3332 * Contains the userspace hsm_progress and some internal fields.
3333 */
3334 struct hsm_progress_kernel {
3335 /* Field taken from struct hsm_progress */
3336 struct lu_fid hpk_fid;
3337 __u64 hpk_cookie;
3338 struct hsm_extent hpk_extent;
3339 __u16 hpk_flags;
3340 __u16 hpk_errval; /* positive val */
3341 __u32 hpk_padding1;
3342 /* Additional fields */
3343 __u64 hpk_data_version;
3344 __u64 hpk_padding2;
3345 } __packed;
3346
3347 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3348 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3349 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3350 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3351 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3352 void lustre_swab_hsm_request(struct hsm_request *hr);
3353
3354 /** layout swap request structure
3355 * fid1 and fid2 are in mdt_body
3356 */
3357 struct mdc_swap_layouts {
3358 __u64 msl_flags;
3359 } __packed;
3360
3361 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3362
3363 struct close_data {
3364 struct lustre_handle cd_handle;
3365 struct lu_fid cd_fid;
3366 __u64 cd_data_version;
3367 __u64 cd_reserved[8];
3368 };
3369
3370 void lustre_swab_close_data(struct close_data *data);
3371
3372 #endif
3373 /** @} lustreidl */