]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/ceph/ceph_fs.h
ceph: quota: add initial infrastructure to support cephfs quotas
[mirror_ubuntu-bionic-kernel.git] / include / linux / ceph / ceph_fs.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0dee3c28
SW
2/*
3 * ceph_fs.h - Ceph constants and data types to share between kernel and
4 * user space.
5 *
6 * Most types in this file are defined as little-endian, and are
7 * primarily intended to describe data structures that pass over the
8 * wire or that are stored on disk.
9 *
10 * LGPL2
11 */
12
5cd068c2
SW
13#ifndef CEPH_FS_H
14#define CEPH_FS_H
0dee3c28 15
a1ce3928
DH
16#include <linux/ceph/msgr.h>
17#include <linux/ceph/rados.h>
0dee3c28 18
0dee3c28
SW
19/*
20 * subprotocol versions. when specific messages types or high-level
21 * protocols change, bump the affected components. we keep rev
22 * internal cluster protocols separately from the public,
23 * client-facing protocol.
24 */
0c948992 25#define CEPH_OSDC_PROTOCOL 24 /* server/client */
3ea25f94 26#define CEPH_MDSC_PROTOCOL 32 /* server/client */
13e38c8a 27#define CEPH_MONC_PROTOCOL 15 /* server/client */
0dee3c28
SW
28
29
dd6f5e10
AE
30#define CEPH_INO_ROOT 1
31#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
32#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
0dee3c28
SW
33
34/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
35#define CEPH_MAX_MON 31
36
0dee3c28 37/*
7627151e 38 * legacy ceph_file_layoute
0dee3c28 39 */
7627151e 40struct ceph_file_layout_legacy {
0dee3c28
SW
41 /* file -> object mapping */
42 __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
43 of page size. */
44 __le32 fl_stripe_count; /* over this many objects */
45 __le32 fl_object_size; /* until objects are this big, then move to
46 new objects */
6c0f3af7 47 __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */
0dee3c28
SW
48
49 /* pg -> disk layout */
6c0f3af7 50 __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
0dee3c28
SW
51
52 /* object -> pg layout */
dd6f5e10 53 __le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */
0dee3c28
SW
54 __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
55} __attribute__ ((packed));
56
30c156d9 57struct ceph_string;
7627151e
YZ
58/*
59 * ceph_file_layout - describe data layout for a file/inode
60 */
61struct ceph_file_layout {
62 /* file -> object mapping */
63 u32 stripe_unit; /* stripe unit, in bytes */
64 u32 stripe_count; /* over this many objects */
65 u32 object_size; /* until objects are this big */
66 s64 pool_id; /* rados pool id */
30c156d9 67 struct ceph_string __rcu *pool_ns; /* rados pool namespace */
7627151e
YZ
68};
69
70extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
71extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
72 struct ceph_file_layout_legacy *legacy);
73extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
74 struct ceph_file_layout_legacy *legacy);
e8221464 75
752727a1 76#define CEPH_MIN_STRIPE_UNIT 65536
0dee3c28 77
6c0f3af7
SW
78struct ceph_dir_layout {
79 __u8 dl_dir_hash; /* see ceph_hash.h for ids */
80 __u8 dl_unused1;
81 __u16 dl_unused2;
82 __u32 dl_unused3;
83} __attribute__ ((packed));
0dee3c28 84
4e7a5dcd
SW
85/* crypto algorithms */
86#define CEPH_CRYPTO_NONE 0x0
87#define CEPH_CRYPTO_AES 0x1
88
cbbfe499
SW
89#define CEPH_AES_IV "cephsageyudagreg"
90
4e7a5dcd
SW
91/* security/authentication protocols */
92#define CEPH_AUTH_UNKNOWN 0x0
93#define CEPH_AUTH_NONE 0x1
94#define CEPH_AUTH_CEPHX 0x2
95
ca9d93a2
SW
96#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
97
4e7a5dcd 98
0dee3c28
SW
99/*********************************************
100 * message layer
101 */
102
103/*
104 * message types
105 */
106
107/* misc */
108#define CEPH_MSG_SHUTDOWN 1
109#define CEPH_MSG_PING 2
110
111/* client <-> monitor */
112#define CEPH_MSG_MON_MAP 4
113#define CEPH_MSG_MON_GET_MAP 5
0dee3c28
SW
114#define CEPH_MSG_STATFS 13
115#define CEPH_MSG_STATFS_REPLY 14
116#define CEPH_MSG_MON_SUBSCRIBE 15
117#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
4e7a5dcd
SW
118#define CEPH_MSG_AUTH 17
119#define CEPH_MSG_AUTH_REPLY 18
dd6f5e10
AE
120#define CEPH_MSG_MON_GET_VERSION 19
121#define CEPH_MSG_MON_GET_VERSION_REPLY 20
0dee3c28
SW
122
123/* client <-> mds */
0dee3c28 124#define CEPH_MSG_MDS_MAP 21
0cabbd94 125#define CEPH_MSG_FS_MAP_USER 103
0dee3c28
SW
126
127#define CEPH_MSG_CLIENT_SESSION 22
128#define CEPH_MSG_CLIENT_RECONNECT 23
129
130#define CEPH_MSG_CLIENT_REQUEST 24
131#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
132#define CEPH_MSG_CLIENT_REPLY 26
133#define CEPH_MSG_CLIENT_CAPS 0x310
134#define CEPH_MSG_CLIENT_LEASE 0x311
135#define CEPH_MSG_CLIENT_SNAP 0x312
136#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
7370e8a4 137#define CEPH_MSG_CLIENT_QUOTA 0x314
0dee3c28 138
ca9d93a2
SW
139/* pool ops */
140#define CEPH_MSG_POOLOP_REPLY 48
141#define CEPH_MSG_POOLOP 49
142
6305a3b4
DF
143/* mon commands */
144#define CEPH_MSG_MON_COMMAND 50
145#define CEPH_MSG_MON_COMMAND_ACK 51
ca9d93a2 146
0dee3c28 147/* osd */
483fac71
YS
148#define CEPH_MSG_OSD_MAP 41
149#define CEPH_MSG_OSD_OP 42
150#define CEPH_MSG_OSD_OPREPLY 43
151#define CEPH_MSG_WATCH_NOTIFY 44
a02a946d 152#define CEPH_MSG_OSD_BACKOFF 61
483fac71
YS
153
154
155/* watch-notify operations */
156enum {
922dab61
ID
157 CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */
158 CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */
159 CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */
483fac71
YS
160};
161
0dee3c28 162
13e38c8a
SW
163struct ceph_mon_request_header {
164 __le64 have_version;
165 __le16 session_mon;
166 __le64 session_mon_tid;
167} __attribute__ ((packed));
0dee3c28
SW
168
169struct ceph_mon_statfs {
13e38c8a 170 struct ceph_mon_request_header monhdr;
0dee3c28 171 struct ceph_fsid fsid;
06d74376
DF
172 __u8 contains_data_pool;
173 __le64 data_pool;
0dee3c28
SW
174} __attribute__ ((packed));
175
176struct ceph_statfs {
177 __le64 kb, kb_used, kb_avail;
178 __le64 num_objects;
179} __attribute__ ((packed));
180
181struct ceph_mon_statfs_reply {
182 struct ceph_fsid fsid;
0dee3c28
SW
183 __le64 version;
184 struct ceph_statfs st;
185} __attribute__ ((packed));
186
6305a3b4
DF
187struct ceph_mon_command {
188 struct ceph_mon_request_header monhdr;
189 struct ceph_fsid fsid;
190 __le32 num_strs; /* always 1 */
191 __le32 str_len;
192 char str[];
193} __attribute__ ((packed));
194
0dee3c28 195struct ceph_osd_getmap {
13e38c8a 196 struct ceph_mon_request_header monhdr;
0dee3c28
SW
197 struct ceph_fsid fsid;
198 __le32 start;
199} __attribute__ ((packed));
200
201struct ceph_mds_getmap {
13e38c8a 202 struct ceph_mon_request_header monhdr;
0dee3c28
SW
203 struct ceph_fsid fsid;
204} __attribute__ ((packed));
205
206struct ceph_client_mount {
13e38c8a 207 struct ceph_mon_request_header monhdr;
0dee3c28
SW
208} __attribute__ ((packed));
209
483fac71
YS
210#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
211
0dee3c28 212struct ceph_mon_subscribe_item {
82dcabad
ID
213 __le64 start;
214 __u8 flags;
0dee3c28
SW
215} __attribute__ ((packed));
216
07bd10fb
SW
217struct ceph_mon_subscribe_ack {
218 __le32 duration; /* seconds */
219 struct ceph_fsid fsid;
220} __attribute__ ((packed));
221
737cc81e
ID
222#define CEPH_FS_CLUSTER_ID_NONE -1
223
dd6f5e10
AE
224/*
225 * mdsmap flags
226 */
227#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */
228
0dee3c28
SW
229/*
230 * mds states
231 * > 0 -> in
232 * <= 0 -> out
233 */
234#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
235#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
236 empty log. */
237#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
238#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
239#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
240#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
241#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
dd6f5e10 242#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */
0dee3c28
SW
243
244#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
245#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
246 operations (import, rename, etc.) */
247#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
248#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
249#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
250#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
251#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
252
253extern const char *ceph_mds_state_name(int s);
254
255
256/*
257 * metadata lock types.
258 * - these are bitmasks.. we can compose them
259 * - they also define the lock ordering by the MDS
260 * - a few of these are internal to the mds
261 */
dd1c9057
SW
262#define CEPH_LOCK_DVERSION 1
263#define CEPH_LOCK_DN 2
264#define CEPH_LOCK_ISNAP 16
265#define CEPH_LOCK_IVERSION 32 /* mds internal */
266#define CEPH_LOCK_IFILE 64
267#define CEPH_LOCK_IAUTH 128
268#define CEPH_LOCK_ILINK 256
269#define CEPH_LOCK_IDFT 512 /* dir frag tree */
270#define CEPH_LOCK_INEST 1024 /* mds internal */
271#define CEPH_LOCK_IXATTR 2048
f0b18d9f 272#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
dd1c9057 273#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
dd6f5e10 274#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */
0dee3c28
SW
275
276/* client_session ops */
277enum {
278 CEPH_SESSION_REQUEST_OPEN,
279 CEPH_SESSION_OPEN,
280 CEPH_SESSION_REQUEST_CLOSE,
281 CEPH_SESSION_CLOSE,
282 CEPH_SESSION_REQUEST_RENEWCAPS,
283 CEPH_SESSION_RENEWCAPS,
284 CEPH_SESSION_STALE,
285 CEPH_SESSION_RECALL_STATE,
186e4f7a
YZ
286 CEPH_SESSION_FLUSHMSG,
287 CEPH_SESSION_FLUSHMSG_ACK,
03f4fcb0 288 CEPH_SESSION_FORCE_RO,
fcff415c 289 CEPH_SESSION_REJECT,
0dee3c28
SW
290};
291
292extern const char *ceph_session_op_name(int op);
293
294struct ceph_mds_session_head {
295 __le32 op;
296 __le64 seq;
297 struct ceph_timespec stamp;
298 __le32 max_caps, max_leases;
299} __attribute__ ((packed));
300
301/* client_request */
302/*
303 * metadata ops.
304 * & 0x001000 -> write op
305 * & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
306 & & 0x100000 -> use weird ino/path trace
307 */
308#define CEPH_MDS_OP_WRITE 0x001000
309enum {
310 CEPH_MDS_OP_LOOKUP = 0x00100,
311 CEPH_MDS_OP_GETATTR = 0x00101,
312 CEPH_MDS_OP_LOOKUPHASH = 0x00102,
313 CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
3c454cf2 314 CEPH_MDS_OP_LOOKUPINO = 0x00104,
19913b4e 315 CEPH_MDS_OP_LOOKUPNAME = 0x00105,
0dee3c28
SW
316
317 CEPH_MDS_OP_SETXATTR = 0x01105,
318 CEPH_MDS_OP_RMXATTR = 0x01106,
319 CEPH_MDS_OP_SETLAYOUT = 0x01107,
320 CEPH_MDS_OP_SETATTR = 0x01108,
fbaad979
GF
321 CEPH_MDS_OP_SETFILELOCK= 0x01109,
322 CEPH_MDS_OP_GETFILELOCK= 0x00110,
571dba52 323 CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
0dee3c28
SW
324
325 CEPH_MDS_OP_MKNOD = 0x01201,
326 CEPH_MDS_OP_LINK = 0x01202,
327 CEPH_MDS_OP_UNLINK = 0x01203,
328 CEPH_MDS_OP_RENAME = 0x01204,
329 CEPH_MDS_OP_MKDIR = 0x01220,
330 CEPH_MDS_OP_RMDIR = 0x01221,
331 CEPH_MDS_OP_SYMLINK = 0x01222,
332
3ea25f94 333 CEPH_MDS_OP_CREATE = 0x01301,
0dee3c28
SW
334 CEPH_MDS_OP_OPEN = 0x00302,
335 CEPH_MDS_OP_READDIR = 0x00305,
336
337 CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
338 CEPH_MDS_OP_MKSNAP = 0x01400,
339 CEPH_MDS_OP_RMSNAP = 0x01401,
340 CEPH_MDS_OP_LSSNAP = 0x00402,
0ea611a3 341 CEPH_MDS_OP_RENAMESNAP = 0x01403,
0dee3c28
SW
342};
343
344extern const char *ceph_mds_op_name(int op);
345
346
347#define CEPH_SETATTR_MODE 1
348#define CEPH_SETATTR_UID 2
349#define CEPH_SETATTR_GID 4
350#define CEPH_SETATTR_MTIME 8
351#define CEPH_SETATTR_ATIME 16
352#define CEPH_SETATTR_SIZE 32
353#define CEPH_SETATTR_CTIME 64
354
dd6f5e10
AE
355/*
356 * Ceph setxattr request flags.
357 */
bcdfeb2e
YZ
358#define CEPH_XATTR_CREATE (1 << 0)
359#define CEPH_XATTR_REPLACE (1 << 1)
360#define CEPH_XATTR_REMOVE (1 << 31)
dd6f5e10 361
956d39d6
YZ
362/*
363 * readdir request flags;
364 */
365#define CEPH_READDIR_REPLY_BITFLAGS (1<<0)
366
367/*
368 * readdir reply flags.
369 */
370#define CEPH_READDIR_FRAG_END (1<<0)
371#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
f3c4ebe6 372#define CEPH_READDIR_HASH_ORDER (1<<9)
79162547 373#define CEPH_READDIR_OFFSET_HASH (1<<10)
956d39d6 374
f775ff7d
AG
375/*
376 * open request flags
377 */
378#define CEPH_O_RDONLY 00000000
379#define CEPH_O_WRONLY 00000001
380#define CEPH_O_RDWR 00000002
381#define CEPH_O_CREAT 00000100
382#define CEPH_O_EXCL 00000200
383#define CEPH_O_TRUNC 00001000
384#define CEPH_O_DIRECTORY 00200000
385#define CEPH_O_NOFOLLOW 00400000
386
0dee3c28
SW
387union ceph_mds_request_args {
388 struct {
389 __le32 mask; /* CEPH_CAP_* */
390 } __attribute__ ((packed)) getattr;
391 struct {
392 __le32 mode;
393 __le32 uid;
394 __le32 gid;
395 struct ceph_timespec mtime;
396 struct ceph_timespec atime;
397 __le64 size, old_size; /* old_size needed by truncate */
398 __le32 mask; /* CEPH_SETATTR_* */
399 } __attribute__ ((packed)) setattr;
400 struct {
401 __le32 frag; /* which dir fragment */
402 __le32 max_entries; /* how many dentries to grab */
23804d91 403 __le32 max_bytes;
956d39d6 404 __le16 flags;
79162547 405 __le32 offset_hash;
0dee3c28
SW
406 } __attribute__ ((packed)) readdir;
407 struct {
408 __le32 mode;
409 __le32 rdev;
410 } __attribute__ ((packed)) mknod;
411 struct {
412 __le32 mode;
413 } __attribute__ ((packed)) mkdir;
414 struct {
415 __le32 flags;
416 __le32 mode;
417 __le32 stripe_unit; /* layout for newly created file */
418 __le32 stripe_count; /* ... */
419 __le32 object_size;
420 __le32 file_replication;
315f2408
YZ
421 __le32 mask; /* CEPH_CAP_* */
422 __le32 old_size;
0dee3c28
SW
423 } __attribute__ ((packed)) open;
424 struct {
425 __le32 flags;
426 } __attribute__ ((packed)) setxattr;
427 struct {
7627151e 428 struct ceph_file_layout_legacy layout;
0dee3c28 429 } __attribute__ ((packed)) setlayout;
fbaad979
GF
430 struct {
431 __u8 rule; /* currently fcntl or flock */
432 __u8 type; /* shared, exclusive, remove*/
eb13e832 433 __le64 owner; /* owner of the lock */
fbaad979 434 __le64 pid; /* process id requesting the lock */
fbaad979
GF
435 __le64 start; /* initial location to lock */
436 __le64 length; /* num bytes to lock from start */
437 __u8 wait; /* will caller wait for lock to become available? */
438 } __attribute__ ((packed)) filelock_change;
0dee3c28
SW
439} __attribute__ ((packed));
440
441#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
442#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
443
444struct ceph_mds_request_head {
6df058c0 445 __le64 oldest_client_tid;
0dee3c28
SW
446 __le32 mdsmap_epoch; /* on client */
447 __le32 flags; /* CEPH_MDS_FLAG_* */
448 __u8 num_retry, num_fwd; /* count retry, fwd attempts */
449 __le16 num_releases; /* # include cap/lease release records */
450 __le32 op; /* mds op code */
451 __le32 caller_uid, caller_gid;
452 __le64 ino; /* use this ino for openc, mkdir, mknod,
453 etc. (if replaying) */
454 union ceph_mds_request_args args;
455} __attribute__ ((packed));
456
457/* cap/lease release record */
458struct ceph_mds_request_release {
459 __le64 ino, cap_id; /* ino and unique cap id */
460 __le32 caps, wanted; /* new issued, wanted */
461 __le32 seq, issue_seq, mseq;
462 __le32 dname_seq; /* if releasing a dentry lease, a */
463 __le32 dname_len; /* string follows. */
464} __attribute__ ((packed));
465
466/* client reply */
467struct ceph_mds_reply_head {
0dee3c28
SW
468 __le32 op;
469 __le32 result;
470 __le32 mdsmap_epoch;
471 __u8 safe; /* true if committed to disk */
472 __u8 is_dentry, is_target; /* true if dentry, target inode records
473 are included with reply */
474} __attribute__ ((packed));
475
476/* one for each node split */
477struct ceph_frag_tree_split {
478 __le32 frag; /* this frag splits... */
479 __le32 by; /* ...by this many bits */
480} __attribute__ ((packed));
481
482struct ceph_frag_tree_head {
483 __le32 nsplits; /* num ceph_frag_tree_split records */
484 struct ceph_frag_tree_split splits[];
485} __attribute__ ((packed));
486
487/* capability issue, for bundling with mds reply */
488struct ceph_mds_reply_cap {
489 __le32 caps, wanted; /* caps issued, wanted */
490 __le64 cap_id;
491 __le32 seq, mseq;
492 __le64 realm; /* snap realm */
493 __u8 flags; /* CEPH_CAP_FLAG_* */
494} __attribute__ ((packed));
495
4ee6a914
YZ
496#define CEPH_CAP_FLAG_AUTH (1 << 0) /* cap is issued by auth mds */
497#define CEPH_CAP_FLAG_RELEASE (1 << 1) /* release the cap */
0dee3c28
SW
498
499/* inode record, for bundling with mds reply */
500struct ceph_mds_reply_inode {
501 __le64 ino;
502 __le64 snapid;
503 __le32 rdev;
504 __le64 version; /* inode version */
505 __le64 xattr_version; /* version for xattr blob */
506 struct ceph_mds_reply_cap cap; /* caps issued for this inode */
7627151e 507 struct ceph_file_layout_legacy layout;
0dee3c28
SW
508 struct ceph_timespec ctime, mtime, atime;
509 __le32 time_warp_seq;
510 __le64 size, max_size, truncate_size;
511 __le32 truncate_seq;
512 __le32 mode, uid, gid;
513 __le32 nlink;
514 __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */
515 struct ceph_timespec rctime;
516 struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
517} __attribute__ ((packed));
6c0f3af7 518/* followed by frag array, symlink string, dir layout, xattr blob */
0dee3c28
SW
519
520/* reply_lease follows dname, and reply_inode */
521struct ceph_mds_reply_lease {
522 __le16 mask; /* lease type(s) */
523 __le32 duration_ms; /* lease duration */
524 __le32 seq;
525} __attribute__ ((packed));
526
527struct ceph_mds_reply_dirfrag {
528 __le32 frag; /* fragment */
529 __le32 auth; /* auth mds, if this is a delegation point */
530 __le32 ndist; /* number of mds' this is replicated on */
531 __le32 dist[];
532} __attribute__ ((packed));
533
9280be24
YZ
534#define CEPH_LOCK_FCNTL 1
535#define CEPH_LOCK_FLOCK 2
536#define CEPH_LOCK_FCNTL_INTR 3
537#define CEPH_LOCK_FLOCK_INTR 4
538
fbaad979
GF
539
540#define CEPH_LOCK_SHARED 1
541#define CEPH_LOCK_EXCL 2
542#define CEPH_LOCK_UNLOCK 4
543
544struct ceph_filelock {
545 __le64 start;/* file offset to start lock at */
546 __le64 length; /* num bytes to lock; 0 for all following start */
547 __le64 client; /* which client holds the lock */
eb13e832 548 __le64 owner; /* owner the lock */
fbaad979 549 __le64 pid; /* process id holding the lock on the client */
fbaad979
GF
550 __u8 type; /* shared lock, exclusive lock, or unlock */
551} __attribute__ ((packed));
552
553
0dee3c28
SW
554/* file access modes */
555#define CEPH_FILE_MODE_PIN 0
556#define CEPH_FILE_MODE_RD 1
557#define CEPH_FILE_MODE_WR 2
558#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
559#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
774a6a11 560#define CEPH_FILE_MODE_BITS 4
0dee3c28
SW
561
562int ceph_flags_to_mode(int flags);
563
31c542a1 564#define CEPH_INLINE_NONE ((__u64)-1)
0dee3c28
SW
565
566/* capability bits */
567#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
568
569/* generic cap bits */
570#define CEPH_CAP_GSHARED 1 /* client can reads */
571#define CEPH_CAP_GEXCL 2 /* client can read and update */
572#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
573#define CEPH_CAP_GRD 8 /* (file) client can read */
574#define CEPH_CAP_GWR 16 /* (file) client can write */
575#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
576#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
577#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
578
dd6f5e10
AE
579#define CEPH_CAP_SIMPLE_BITS 2
580#define CEPH_CAP_FILE_BITS 8
581
0dee3c28
SW
582/* per-lock shift */
583#define CEPH_CAP_SAUTH 2
584#define CEPH_CAP_SLINK 4
585#define CEPH_CAP_SXATTR 6
fbaad979 586#define CEPH_CAP_SFILE 8
dd6f5e10 587#define CEPH_CAP_SFLOCK 20
0dee3c28 588
dd6f5e10 589#define CEPH_CAP_BITS 22
0dee3c28
SW
590
591/* composed values */
592#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
593#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
594#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
595#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
596#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
597#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
598#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE)
599#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
600#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
601#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
602#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
603#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
604#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
605#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
606#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
fbaad979
GF
607#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK)
608#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK)
609
0dee3c28
SW
610
611/* cap masks (for getattr) */
612#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
613#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
614#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
615#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
616#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
617#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
618#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
619#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
620#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
621#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
622#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
623#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
624#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
625 CEPH_CAP_AUTH_SHARED | \
626 CEPH_CAP_LINK_SHARED | \
627 CEPH_CAP_FILE_SHARED | \
628 CEPH_CAP_XATTR_SHARED)
01deead0
YZ
629#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
630 CEPH_CAP_FILE_RD)
0dee3c28
SW
631
632#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
633 CEPH_CAP_LINK_SHARED | \
634 CEPH_CAP_XATTR_SHARED | \
635 CEPH_CAP_FILE_SHARED)
636#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
637 CEPH_CAP_FILE_CACHE)
638
639#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
640 CEPH_CAP_LINK_EXCL | \
641 CEPH_CAP_XATTR_EXCL | \
642 CEPH_CAP_FILE_EXCL)
f98a128a
YZ
643#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \
644 CEPH_CAP_FILE_SHARED)
0dee3c28
SW
645#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
646 CEPH_CAP_FILE_EXCL)
647#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
648#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
84d95092
SW
649 CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
650 CEPH_CAP_PIN)
0dee3c28
SW
651
652#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
653 CEPH_LOCK_IXATTR)
654
655int ceph_caps_for_mode(int mode);
656
657enum {
658 CEPH_CAP_OP_GRANT, /* mds->client grant */
659 CEPH_CAP_OP_REVOKE, /* mds->client revoke */
660 CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
661 CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
662 CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
663 CEPH_CAP_OP_UPDATE, /* client->mds update */
664 CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
665 CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
666 CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
667 CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
668 CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
669 CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
670 CEPH_CAP_OP_RENEW, /* client->mds renewal request */
671};
672
673extern const char *ceph_cap_op_name(int op);
674
1e4ef0c6 675/* flags field in client cap messages (version >= 10) */
95569713
YZ
676#define CEPH_CLIENT_CAPS_SYNC (1<<0)
677#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
678#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2);
1e4ef0c6 679
0dee3c28
SW
680/*
681 * caps message, used for capability callbacks, acks, requests, etc.
682 */
683struct ceph_mds_caps {
684 __le32 op; /* CEPH_CAP_OP_* */
685 __le64 ino, realm;
686 __le64 cap_id;
687 __le32 seq, issue_seq;
688 __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
689 __le32 migrate_seq;
690 __le64 snap_follows;
691 __le32 snap_trace_len;
0dee3c28
SW
692
693 /* authlock */
694 __le32 uid, gid, mode;
695
696 /* linklock */
697 __le32 nlink;
698
699 /* xattrlock */
700 __le32 xattr_len;
701 __le64 xattr_version;
702
703 /* filelock */
704 __le64 size, max_size, truncate_size;
705 __le32 truncate_seq;
706 struct ceph_timespec mtime, atime, ctime;
7627151e 707 struct ceph_file_layout_legacy layout;
0dee3c28
SW
708 __le32 time_warp_seq;
709} __attribute__ ((packed));
710
4ee6a914
YZ
711struct ceph_mds_cap_peer {
712 __le64 cap_id;
713 __le32 seq;
714 __le32 mseq;
715 __le32 mds;
716 __u8 flags;
717} __attribute__ ((packed));
718
0dee3c28
SW
719/* cap release msg head */
720struct ceph_mds_cap_release {
721 __le32 num; /* number of cap_items that follow */
722} __attribute__ ((packed));
723
724struct ceph_mds_cap_item {
725 __le64 ino;
726 __le64 cap_id;
727 __le32 migrate_seq, seq;
728} __attribute__ ((packed));
729
730#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
731#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
732#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
733#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
734
735extern const char *ceph_lease_op_name(int o);
736
737/* lease msg header */
738struct ceph_mds_lease {
739 __u8 action; /* CEPH_MDS_LEASE_* */
740 __le16 mask; /* which lease */
741 __le64 ino;
742 __le64 first, last; /* snap range */
743 __le32 seq;
744 __le32 duration_ms; /* duration of renewal */
745} __attribute__ ((packed));
746/* followed by a __le32+string for dname */
747
748/* client reconnect */
749struct ceph_mds_cap_reconnect {
20cb34ae
SW
750 __le64 cap_id;
751 __le32 wanted;
752 __le32 issued;
753 __le64 snaprealm;
754 __le64 pathbase; /* base ino for our path to this ino */
755 __le32 flock_len; /* size of flock state blob, if any */
756} __attribute__ ((packed));
757/* followed by flock blob */
758
759struct ceph_mds_cap_reconnect_v1 {
0dee3c28
SW
760 __le64 cap_id;
761 __le32 wanted;
762 __le32 issued;
763 __le64 size;
764 struct ceph_timespec mtime, atime;
765 __le64 snaprealm;
766 __le64 pathbase; /* base ino for our path to this ino */
767} __attribute__ ((packed));
0dee3c28
SW
768
769struct ceph_mds_snaprealm_reconnect {
770 __le64 ino; /* snap realm base */
771 __le64 seq; /* snap seq for this snap realm */
772 __le64 parent; /* parent realm */
773} __attribute__ ((packed));
774
775/*
776 * snaps
777 */
778enum {
779 CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
780 CEPH_SNAP_OP_CREATE,
781 CEPH_SNAP_OP_DESTROY,
782 CEPH_SNAP_OP_SPLIT,
783};
784
785extern const char *ceph_snap_op_name(int o);
786
787/* snap msg header */
788struct ceph_mds_snap_head {
789 __le32 op; /* CEPH_SNAP_OP_* */
790 __le64 split; /* ino to split off, if any */
791 __le32 num_split_inos; /* # inos belonging to new child realm */
792 __le32 num_split_realms; /* # child realms udner new child realm */
793 __le32 trace_len; /* size of snap trace blob */
794} __attribute__ ((packed));
795/* followed by split ino list, then split realms, then the trace blob */
796
797/*
798 * encode info about a snaprealm, as viewed by a client
799 */
800struct ceph_mds_snap_realm {
801 __le64 ino; /* ino */
802 __le64 created; /* snap: when created */
803 __le64 parent; /* ino: parent realm */
804 __le64 parent_since; /* snap: same parent since */
805 __le64 seq; /* snap: version */
806 __le32 num_snaps;
807 __le32 num_prior_parent_snaps;
808} __attribute__ ((packed));
809/* followed by my snap list, then prior parent snap list */
810
7370e8a4
LH
811/*
812 * quotas
813 */
814struct ceph_mds_quota {
815 __le64 ino; /* ino */
816 struct ceph_timespec rctime;
817 __le64 rbytes; /* dir stats */
818 __le64 rfiles;
819 __le64 rsubdirs;
820 __u8 struct_v; /* compat */
821 __u8 struct_compat;
822 __le32 struct_len;
823 __le64 max_bytes; /* quota max. bytes */
824 __le64 max_files; /* quota max. files */
825} __attribute__ ((packed));
826
0dee3c28 827#endif