]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
ceph: save cap_auths in MDS client when session is opened
authorXiubo Li <xiubli@redhat.com>
Mon, 25 Sep 2023 08:25:20 +0000 (16:25 +0800)
committerIlya Dryomov <idryomov@gmail.com>
Thu, 23 May 2024 08:35:46 +0000 (10:35 +0200)
Save the cap_auths, which have been parsed by the MDS, in the opened
session.

[ idryomov: use s64 and u32 instead of int64_t and uint32_t, switch to
            bool for root_squash, readable and writeable ]

Link: https://tracker.ceph.com/issues/61333
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
fs/ceph/mds_client.c
fs/ceph/mds_client.h

index 360b686c3c67cfd1f256c656642957f6ca278427..6a1e68549475ebcfc5c1fd4c4be2dd15bf446087 100644 (file)
@@ -4112,10 +4112,13 @@ static void handle_session(struct ceph_mds_session *session,
        void *p = msg->front.iov_base;
        void *end = p + msg->front.iov_len;
        struct ceph_mds_session_head *h;
-       u32 op;
+       struct ceph_mds_cap_auth *cap_auths = NULL;
+       u32 op, cap_auths_num = 0;
        u64 seq, features = 0;
        int wake = 0;
        bool blocklisted = false;
+       u32 i;
+
 
        /* decode */
        ceph_decode_need(&p, end, sizeof(*h), bad);
@@ -4160,7 +4163,101 @@ static void handle_session(struct ceph_mds_session *session,
                }
        }
 
+       if (msg_version >= 6) {
+               ceph_decode_32_safe(&p, end, cap_auths_num, bad);
+               doutc(cl, "cap_auths_num %d\n", cap_auths_num);
+
+               if (cap_auths_num && op != CEPH_SESSION_OPEN) {
+                       WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
+                       goto skip_cap_auths;
+               }
+
+               cap_auths = kcalloc(cap_auths_num,
+                                   sizeof(struct ceph_mds_cap_auth),
+                                   GFP_KERNEL);
+               if (!cap_auths) {
+                       pr_err_client(cl, "No memory for cap_auths\n");
+                       return;
+               }
+
+               for (i = 0; i < cap_auths_num; i++) {
+                       u32 _len, j;
+
+                       /* struct_v, struct_compat, and struct_len in MDSCapAuth */
+                       ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+
+                       /* struct_v, struct_compat, and struct_len in MDSCapMatch */
+                       ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+                       ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
+                       ceph_decode_32_safe(&p, end, _len, bad);
+                       if (_len) {
+                               cap_auths[i].match.gids = kcalloc(_len, sizeof(u32),
+                                                                 GFP_KERNEL);
+                               if (!cap_auths[i].match.gids) {
+                                       pr_err_client(cl, "No memory for gids\n");
+                                       goto fail;
+                               }
+
+                               cap_auths[i].match.num_gids = _len;
+                               for (j = 0; j < _len; j++)
+                                       ceph_decode_32_safe(&p, end,
+                                                           cap_auths[i].match.gids[j],
+                                                           bad);
+                       }
+
+                       ceph_decode_32_safe(&p, end, _len, bad);
+                       if (_len) {
+                               cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
+                                                                 GFP_KERNEL);
+                               if (!cap_auths[i].match.path) {
+                                       pr_err_client(cl, "No memory for path\n");
+                                       goto fail;
+                               }
+                               ceph_decode_copy(&p, cap_auths[i].match.path, _len);
+
+                               /* Remove the tailing '/' */
+                               while (_len && cap_auths[i].match.path[_len - 1] == '/') {
+                                       cap_auths[i].match.path[_len - 1] = '\0';
+                                       _len -= 1;
+                               }
+                       }
+
+                       ceph_decode_32_safe(&p, end, _len, bad);
+                       if (_len) {
+                               cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
+                                                                    GFP_KERNEL);
+                               if (!cap_auths[i].match.fs_name) {
+                                       pr_err_client(cl, "No memory for fs_name\n");
+                                       goto fail;
+                               }
+                               ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
+                       }
+
+                       ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
+                       ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
+                       ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
+                       doutc(cl, "uid %lld, num_gids %u, path %s, fs_name %s, root_squash %d, readable %d, writeable %d\n",
+                             cap_auths[i].match.uid, cap_auths[i].match.num_gids,
+                             cap_auths[i].match.path, cap_auths[i].match.fs_name,
+                             cap_auths[i].match.root_squash,
+                             cap_auths[i].readable, cap_auths[i].writeable);
+               }
+       }
+
+skip_cap_auths:
        mutex_lock(&mdsc->mutex);
+       if (op == CEPH_SESSION_OPEN) {
+               if (mdsc->s_cap_auths) {
+                       for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+                               kfree(mdsc->s_cap_auths[i].match.gids);
+                               kfree(mdsc->s_cap_auths[i].match.path);
+                               kfree(mdsc->s_cap_auths[i].match.fs_name);
+                       }
+                       kfree(mdsc->s_cap_auths);
+               }
+               mdsc->s_cap_auths_num = cap_auths_num;
+               mdsc->s_cap_auths = cap_auths;
+       }
        if (op == CEPH_SESSION_CLOSE) {
                ceph_get_mds_session(session);
                __unregister_session(mdsc, session);
@@ -4290,6 +4387,13 @@ bad:
        pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
                      (int)msg->front.iov_len);
        ceph_msg_dump(msg);
+fail:
+       for (i = 0; i < cap_auths_num; i++) {
+               kfree(cap_auths[i].match.gids);
+               kfree(cap_auths[i].match.path);
+               kfree(cap_auths[i].match.fs_name);
+       }
+       kfree(cap_auths);
        return;
 }
 
index b88e804152241281e5d1cd5ca90057d9deff9240..cc106f1aa7d2a9cb021695fbff838d0908d8d9c9 100644 (file)
@@ -71,6 +71,24 @@ enum ceph_feature_type {
 struct ceph_fs_client;
 struct ceph_cap;
 
+#define MDS_AUTH_UID_ANY -1
+
+struct ceph_mds_cap_match {
+       s64 uid;  /* default to MDS_AUTH_UID_ANY */
+       u32 num_gids;
+       u32 *gids;  /* use these GIDs */
+       char *path;  /* require path to be child of this
+                       (may be "" or "/" for any) */
+       char *fs_name;
+       bool root_squash;  /* default to false */
+};
+
+struct ceph_mds_cap_auth {
+       struct ceph_mds_cap_match match;
+       bool readable;
+       bool writeable;
+};
+
 /*
  * parsed info about a single inode.  pointers are into the encoded
  * on-wire structures within the mds reply message payload.
@@ -513,6 +531,9 @@ struct ceph_mds_client {
        struct rw_semaphore     pool_perm_rwsem;
        struct rb_root          pool_perm_tree;
 
+       u32                      s_cap_auths_num;
+       struct ceph_mds_cap_auth *s_cap_auths;
+
        char nodename[__NEW_UTS_LEN + 1];
 };