*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
+
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include "fs/fs_subr.h"
-#include <acl/acl_common.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
* changed.
*/
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
- if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
+ if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zsb),
&size)) != 0)
return (error);
*aclsize = size;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zsb),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
aclphys, sizeof (*aclphys))) != 0)
return (error);
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
- SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
+ SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
static int
zfs_acl_version_zp(znode_t *zp)
{
- return (zfs_acl_version(zp->z_zfsvfs->z_version));
+ return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
{
zfs_acl_t *aclp;
- aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
+ aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_PUSHPAGE);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
- aclp->z_ops = zfs_acl_fuid_ops;
+ aclp->z_ops = &zfs_acl_fuid_ops;
else
- aclp->z_ops = zfs_acl_v0_ops;
+ aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
{
zfs_acl_node_t *aclnode;
- aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
+ aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_PUSHPAGE);
if (bytes) {
- aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
+ aclnode->z_acldata = kmem_alloc(bytes, KM_PUSHPAGE);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
{
zfs_acl_node_t *aclnode;
- while (aclnode = list_head(&aclp->z_acl)) {
+ while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
}
static boolean_t
-zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
+zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
* next check inheritance level flags
*/
- if (obj_type == VDIR &&
+ if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* Make sure we don't overstep our bounds
*/
- ace_size = aclp->z_ops.ace_size(acep);
+ ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
- *iflags = aclp->z_ops.ace_flags_get(acep);
- *type = aclp->z_ops.ace_type_get(acep);
- *access_mask = aclp->z_ops.ace_mask_get(acep);
- *who = aclp->z_ops.ace_who_get(acep);
+ *iflags = aclp->z_ops->ace_flags_get(acep);
+ *type = aclp->z_ops->ace_type_get(acep);
+ *access_mask = aclp->z_ops->ace_mask_get(acep);
+ *who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((uint64_t)(uintptr_t)acep);
}
-static zfs_acl_node_t *
-zfs_acl_curr_node(zfs_acl_t *aclp)
-{
- ASSERT(aclp->z_curr_node);
- return (aclp->z_curr_node);
-}
-
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
int
-zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
+zfs_copy_ace_2_fuid(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
- aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
+ aceptr->z_fuid = zfs_fuid_create(zsb, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
- if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
+ if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
- aclp->z_ops.ace_size(aceptr));
+ aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
-zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
+zfs_copy_fuid_2_ace(zfs_sb_t *zsb, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
size_t ace_size;
uint16_t entry_type;
- while (zacep = zfs_acl_next_ace(aclp, zacep,
- &who, &access_mask, &iflags, &type)) {
+ while ((zacep = zfs_acl_next_ace(aclp, zacep,
+ &who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
- acep->a_who = zfs_fuid_map_id(zfsvfs, who,
+ acep->a_who = zfs_fuid_map_id(zsb, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
}
static int
-zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
+zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
/*
* Make sure ACE is valid
*/
- if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
+ if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
- while (cookie = zfs_acl_next_ace(aclp, cookie, &who,
- &access_mask, &iflags, &type)) {
+ while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
+ &access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
- aclp->z_ops = zfs_acl_fuid_ops;
- VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
- oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
+ aclp->z_ops = &zfs_acl_fuid_ops;
+ VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
+ aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
- aclp->z_ops.ace_mask_set(acep, access_mask);
- aclp->z_ops.ace_type_set(acep, access_type);
- aclp->z_ops.ace_flags_set(acep, entry_type);
+ aclp->z_ops->ace_mask_set(acep, access_mask);
+ aclp->z_ops->ace_type_set(acep, access_type);
+ aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
- aclp->z_ops.ace_who_set(acep, fuid);
+ aclp->z_ops->ace_who_set(acep, fuid);
}
/*
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
- while (acep = zfs_acl_next_ace(aclp, acep, &who,
- &access_mask, &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who,
+ &access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
boolean_t will_modify)
{
zfs_acl_t *aclp;
- int aclsize;
- int acl_count;
+ int aclsize = 0;
+ int acl_count = 0;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
- error = dmu_read(zp->z_zfsvfs->z_os,
+ error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
aclnode->z_size);
}
} else {
- error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs),
+ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
- error = EIO;
+ error = SET_ERROR(EIO);
goto done;
}
int error;
zfs_acl_t *aclp;
+ if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIXACL)
+ return (0);
+
ASSERT(MUTEX_HELD(&zp->z_lock));
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, zp->z_uid, zp->z_gid);
+
return (error);
}
+static void
+acl_trivial_access_masks(mode_t mode, uint32_t *allow0, uint32_t *deny1,
+ uint32_t *deny2, uint32_t *owner, uint32_t *group, uint32_t *everyone)
+{
+ *deny1 = *deny2 = *allow0 = *group = 0;
+
+ if (!(mode & S_IRUSR) && (mode & (S_IRGRP|S_IROTH)))
+ *deny1 |= ACE_READ_DATA;
+ if (!(mode & S_IWUSR) && (mode & (S_IWGRP|S_IWOTH)))
+ *deny1 |= ACE_WRITE_DATA;
+ if (!(mode & S_IXUSR) && (mode & (S_IXGRP|S_IXOTH)))
+ *deny1 |= ACE_EXECUTE;
+
+ if (!(mode & S_IRGRP) && (mode & S_IROTH))
+ *deny2 = ACE_READ_DATA;
+ if (!(mode & S_IWGRP) && (mode & S_IWOTH))
+ *deny2 |= ACE_WRITE_DATA;
+ if (!(mode & S_IXGRP) && (mode & S_IXOTH))
+ *deny2 |= ACE_EXECUTE;
+
+ if ((mode & S_IRUSR) && (!(mode & S_IRGRP) && (mode & S_IROTH)))
+ *allow0 |= ACE_READ_DATA;
+ if ((mode & S_IWUSR) && (!(mode & S_IWGRP) && (mode & S_IWOTH)))
+ *allow0 |= ACE_WRITE_DATA;
+ if ((mode & S_IXUSR) && (!(mode & S_IXGRP) && (mode & S_IXOTH)))
+ *allow0 |= ACE_EXECUTE;
+
+ *owner = ACE_WRITE_ATTRIBUTES|ACE_WRITE_OWNER|ACE_WRITE_ACL|
+ ACE_WRITE_NAMED_ATTRS|ACE_READ_ACL|ACE_READ_ATTRIBUTES|
+ ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE;
+ if (mode & S_IRUSR)
+ *owner |= ACE_READ_DATA;
+ if (mode & S_IWUSR)
+ *owner |= ACE_WRITE_DATA|ACE_APPEND_DATA;
+ if (mode & S_IXUSR)
+ *owner |= ACE_EXECUTE;
+
+ *group = ACE_READ_ACL|ACE_READ_ATTRIBUTES| ACE_READ_NAMED_ATTRS|
+ ACE_SYNCHRONIZE;
+ if (mode & S_IRGRP)
+ *group |= ACE_READ_DATA;
+ if (mode & S_IWGRP)
+ *group |= ACE_WRITE_DATA|ACE_APPEND_DATA;
+ if (mode & S_IXGRP)
+ *group |= ACE_EXECUTE;
+
+ *everyone = ACE_READ_ACL|ACE_READ_ATTRIBUTES| ACE_READ_NAMED_ATTRS|
+ ACE_SYNCHRONIZE;
+ if (mode & S_IROTH)
+ *everyone |= ACE_READ_DATA;
+ if (mode & S_IWOTH)
+ *everyone |= ACE_WRITE_DATA|ACE_APPEND_DATA;
+ if (mode & S_IXOTH)
+ *everyone |= ACE_EXECUTE;
+}
+
+/*
+ * ace_trivial:
+ * determine whether an ace_t acl is trivial
+ *
+ * Trivialness implies that the acl is composed of only
+ * owner, group, everyone entries. ACL can't
+ * have read_acl denied, and write_owner/write_acl/write_attributes
+ * can only be owner@ entry.
+ */
+static int
+ace_trivial_common(void *acep, int aclcnt,
+ uint64_t (*walk)(void *, uint64_t, int aclcnt,
+ uint16_t *, uint16_t *, uint32_t *))
+{
+ uint16_t flags;
+ uint32_t mask;
+ uint16_t type;
+ uint64_t cookie = 0;
+
+ while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))) {
+ switch (flags & ACE_TYPE_FLAGS) {
+ case ACE_OWNER:
+ case ACE_GROUP|ACE_IDENTIFIER_GROUP:
+ case ACE_EVERYONE:
+ break;
+ default:
+ return (1);
+ }
+
+ if (flags & (ACE_FILE_INHERIT_ACE|
+ ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE|
+ ACE_INHERIT_ONLY_ACE))
+ return (1);
+
+ /*
+ * Special check for some special bits
+ *
+ * Don't allow anybody to deny reading basic
+ * attributes or a files ACL.
+ */
+ if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
+ (type == ACE_ACCESS_DENIED_ACE_TYPE))
+ return (1);
+
+ /*
+ * Delete permissions are never set by default
+ */
+ if (mask & (ACE_DELETE|ACE_DELETE_CHILD))
+ return (1);
+ /*
+ * only allow owner@ to have
+ * write_acl/write_owner/write_attributes/write_xattr/
+ */
+ if (type == ACE_ACCESS_ALLOWED_ACE_TYPE &&
+ (!(flags & ACE_OWNER) && (mask &
+ (ACE_WRITE_OWNER|ACE_WRITE_ACL| ACE_WRITE_ATTRIBUTES|
+ ACE_WRITE_NAMED_ATTRS))))
+ return (1);
+
+ }
+
+ return (0);
+}
+
/*
* common code for setting ACLs.
*
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
zp->z_uid, zp->z_gid);
zp->z_mode = mode;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
&mode, sizeof (mode));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
/*
* Upgrade needed?
*/
- if (!zfsvfs->z_use_fuids) {
+ if (!zsb->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
- (zfsvfs->z_version >= ZPL_VERSION_FUID))
+ (zsb->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zsb),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zsb),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
zfs_acl_phys_t acl_phys;
uint64_t aoid;
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zsb),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
- error = dmu_object_free(zfsvfs->z_os, aoid, tx);
+ error = dmu_object_free(zsb->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
- aoid = dmu_object_alloc(zfsvfs->z_os,
+ aoid = dmu_object_alloc(zsb->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_MAX_BONUSLEN : 0, tx);
} else {
- (void) dmu_object_set_blocksize(zfsvfs->z_os,
+ (void) dmu_object_set_blocksize(zsb->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
- dmu_write(zfsvfs->z_os, aoid, off,
+ dmu_write(zsb->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
- error = dmu_object_free(zfsvfs->z_os,
+ error = dmu_object_free(zsb->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
}
acl_phys.z_acl_version = aclp->z_version;
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
+ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
&acl_phys, sizeof (acl_phys));
}
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
-/*
- * Update access mask for prepended ACE
- *
- * This applies the "groupmask" value for aclmode property.
- */
-static void
-zfs_acl_prepend_fixup(zfs_acl_t *aclp, void *acep, void *origacep,
- mode_t mode, uint64_t owner)
-{
- int rmask, wmask, xmask;
- int user_ace;
- uint16_t aceflags;
- uint32_t origmask, acepmask;
- uint64_t fuid;
-
- aceflags = aclp->z_ops.ace_flags_get(acep);
- fuid = aclp->z_ops.ace_who_get(acep);
- origmask = aclp->z_ops.ace_mask_get(origacep);
- acepmask = aclp->z_ops.ace_mask_get(acep);
-
- user_ace = (!(aceflags &
- (ACE_OWNER|ACE_GROUP|ACE_IDENTIFIER_GROUP)));
-
- if (user_ace && (fuid == owner)) {
- rmask = S_IRUSR;
- wmask = S_IWUSR;
- xmask = S_IXUSR;
- } else {
- rmask = S_IRGRP;
- wmask = S_IWGRP;
- xmask = S_IXGRP;
- }
-
- if (origmask & ACE_READ_DATA) {
- if (mode & rmask) {
- acepmask &= ~ACE_READ_DATA;
- } else {
- acepmask |= ACE_READ_DATA;
- }
- }
-
- if (origmask & ACE_WRITE_DATA) {
- if (mode & wmask) {
- acepmask &= ~ACE_WRITE_DATA;
- } else {
- acepmask |= ACE_WRITE_DATA;
- }
- }
-
- if (origmask & ACE_APPEND_DATA) {
- if (mode & wmask) {
- acepmask &= ~ACE_APPEND_DATA;
- } else {
- acepmask |= ACE_APPEND_DATA;
- }
- }
-
- if (origmask & ACE_EXECUTE) {
- if (mode & xmask) {
- acepmask &= ~ACE_EXECUTE;
- } else {
- acepmask |= ACE_EXECUTE;
- }
- }
- aclp->z_ops.ace_mask_set(acep, acepmask);
-}
-
static void
-zfs_acl_chmod(zfsvfs_t *zfsvfs, uint64_t mode, zfs_acl_t *aclp)
+zfs_acl_chmod(zfs_sb_t *zsb, uint64_t mode, zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
- int entry_type;
+ int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
- size_t abstract_size = aclp->z_ops.ace_abstract_size();
- void *zacep;
- uint32_t owner, group, everyone;
+ size_t abstract_size = aclp->z_ops->ace_abstract_size();
+ void *zacep;
+ uint32_t owner, group, everyone;
uint32_t deny1, deny2, allow0;
new_count = new_bytes = 0;
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
- } if (deny1) {
+ }
+ if (deny1) {
zfs_set_ace(aclp, zacep, deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
- while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
- &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
+ &iflags, &type))) {
uint16_t inherit_flags;
entry_type = (iflags & ACE_TYPE_FLAGS);
* Limit permissions to be no greater than
* group permissions
*/
- if (zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) {
+ if (zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) {
if (!(mode & S_IRGRP))
access_mask &= ~ACE_READ_DATA;
if (!(mode & S_IWGRP))
}
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
- ace_size = aclp->z_ops.ace_size(acep);
+ ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
mutex_enter(&zp->z_lock);
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
- zfs_acl_chmod(zp->z_zfsvfs, mode, *aclp);
+ zfs_acl_chmod(ZTOZSB(zp), mode, *aclp);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
ASSERT(*aclp);
* strip off write_owner and write_acl
*/
static void
-zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep)
+zfs_restricted_update(zfs_sb_t *zsb, zfs_acl_t *aclp, void *acep)
{
- uint32_t mask = aclp->z_ops.ace_mask_get(acep);
+ uint32_t mask = aclp->z_ops->ace_mask_get(acep);
- if ((zfsvfs->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
- (aclp->z_ops.ace_type_get(acep) == ALLOW)) {
+ if ((zsb->z_acl_inherit == ZFS_ACL_RESTRICTED) &&
+ (aclp->z_ops->ace_type_get(acep) == ALLOW)) {
mask &= ~RESTRICTED_CLEAR;
- aclp->z_ops.ace_mask_set(acep, mask);
+ aclp->z_ops->ace_mask_set(acep, mask);
}
}
* Should ACE be inherited?
*/
static int
-zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
+zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
- if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
+ if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
- return (!((vtype == VDIR) &&
+ return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
-zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
+zfs_acl_inherit(zfs_sb_t *zsb, umode_t obj_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
- boolean_t vdir = vtype == VDIR;
- boolean_t vreg = vtype == VREG;
+ boolean_t vdir = S_ISDIR(obj_mode);
+ boolean_t vreg = S_ISREG(obj_mode);
boolean_t passthrough, passthrough_x, noallow;
passthrough_x =
- zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
+ zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH_X;
passthrough = passthrough_x ||
- zfsvfs->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
+ zsb->z_acl_inherit == ZFS_ACL_PASSTHROUGH;
noallow =
- zfsvfs->z_acl_inherit == ZFS_ACL_NOALLOW;
+ zsb->z_acl_inherit == ZFS_ACL_NOALLOW;
*need_chmod = B_TRUE;
pacep = NULL;
aclp = zfs_acl_alloc(paclp->z_version);
- if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD || vtype == VLNK)
+ if (zsb->z_acl_inherit == ZFS_ACL_DISCARD || S_ISLNK(obj_mode))
return (aclp);
- while (pacep = zfs_acl_next_ace(paclp, pacep, &who,
- &access_mask, &iflags, &type)) {
+ while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
+ &access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
if (noallow && type == ALLOW)
continue;
- ace_size = aclp->z_ops.ace_size(pacep);
+ ace_size = aclp->z_ops->ace_size(pacep);
- if (!zfs_ace_can_use(vtype, iflags))
+ if (!zfs_ace_can_use(obj_mode, iflags))
continue;
/*
/*
* Copy special opaque data if any
*/
- if ((data1sz = paclp->z_ops.ace_data(pacep, &data1)) != 0) {
- VERIFY((data2sz = aclp->z_ops.ace_data(acep,
+ if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
+ VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
bcopy(data1, data2, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
- newflags = aclp->z_ops.ace_flags_get(acep);
+ newflags = aclp->z_ops->ace_flags_get(acep);
if (vdir)
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((iflags & ACE_NO_PROPAGATE_INHERIT_ACE) || !vdir) {
newflags &= ~ALL_INHERIT;
- aclp->z_ops.ace_flags_set(acep,
+ aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
- zfs_restricted_update(zfsvfs, aclp, acep);
+ zfs_restricted_update(zsb, aclp, acep);
continue;
}
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
- aclp->z_ops.ace_flags_set(acep,
+ aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
- aclp->z_ops.ace_flags_set(acep,
+ aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
- zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(dzp);
zfs_acl_t *paclp;
+#ifdef HAVE_KSID
gid_t gid;
+#endif /* HAVE_KSID */
boolean_t need_chmod = B_TRUE;
boolean_t inherited = B_FALSE;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
- acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
+ acl_ids->z_mode = vap->va_mode;
if (vsecp)
- if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr,
- &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
+ if ((error = zfs_vsec_2_aclp(zsb, vap->va_mode, vsecp,
+ cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
+
+ acl_ids->z_fuid = vap->va_uid;
+ acl_ids->z_fgid = vap->va_gid;
+#ifdef HAVE_KSID
/*
* Determine uid and gid.
*/
- if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
- ((flag & IS_XATTR) && (vap->va_type == VDIR))) {
- acl_ids->z_fuid = zfs_fuid_create(zfsvfs,
- (uint64_t)vap->va_uid, cr,
- ZFS_OWNER, &acl_ids->z_fuidp);
- acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
- (uint64_t)vap->va_gid, cr,
- ZFS_GROUP, &acl_ids->z_fuidp);
+ if ((flag & IS_ROOT_NODE) || zsb->z_replay ||
+ ((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
+ acl_ids->z_fuid = zfs_fuid_create(zsb, (uint64_t)vap->va_uid,
+ cr, ZFS_OWNER, &acl_ids->z_fuidp);
+ acl_ids->z_fgid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
+ cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
- acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
+ acl_ids->z_fuid = zfs_fuid_create_cred(zsb, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
- acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
+ acl_ids->z_fgid = zfs_fuid_create(zsb,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
uint32_t rid;
acl_ids->z_fgid = dzp->z_gid;
- gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
+ gid = zfs_fuid_map_id(zsb, acl_ids->z_fgid,
cr, ZFS_GROUP);
- if (zfsvfs->z_use_fuids &&
+ if (zsb->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
- &zfsvfs->z_fuid_idx,
+ &zsb->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
- acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
+ acl_ids->z_fgid = zfs_fuid_create_cred(zsb,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
}
}
+#endif /* HAVE_KSID */
/*
* If we're creating a directory, and the parent directory has the
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
- (vap->va_type == VDIR)) {
+ (S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
- if (!(flag & IS_ROOT_NODE) && (ZTOV(dzp)->v_type == VDIR &&
+ if (!(flag & IS_ROOT_NODE) && (S_ISDIR(ZTOI(dzp)->i_mode) &&
(dzp->z_pflags & ZFS_INHERIT_ACE)) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
- acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
- vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
+ acl_ids->z_aclp = zfs_acl_inherit(zsb,
+ vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
- acl_ids->z_aclp->z_hints |= (vap->va_type == VDIR) ?
+ acl_ids->z_aclp->z_hints |= S_ISDIR(vap->va_mode) ?
ZFS_ACL_AUTO_INHERIT : 0;
- zfs_acl_chmod(zfsvfs, acl_ids->z_mode, acl_ids->z_aclp);
+ zfs_acl_chmod(zsb, acl_ids->z_mode, acl_ids->z_aclp);
}
}
}
boolean_t
-zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_acl_ids_t *acl_ids)
+zfs_acl_ids_overquota(zfs_sb_t *zsb, zfs_acl_ids_t *acl_ids)
{
- return (zfs_fuid_overquota(zfsvfs, B_FALSE, acl_ids->z_fuid) ||
- zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid));
+ return (zfs_fuid_overquota(zsb, B_FALSE, acl_ids->z_fuid) ||
+ zfs_fuid_overquota(zsb, B_TRUE, acl_ids->z_fgid));
}
/*
- * Retrieve a files ACL
+ * Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
- return (ENOSYS);
+ return (SET_ERROR(ENOSYS));
- if (error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr))
+ if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
uint32_t access_mask;
uint16_t type, iflags;
- while (zacep = zfs_acl_next_ace(aclp, zacep,
- &who, &access_mask, &iflags, &type)) {
+ while ((zacep = zfs_acl_next_ace(aclp, zacep,
+ &who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
- zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr,
+ zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
}
int
-zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
+zfs_vsec_2_aclp(zfs_sb_t *zsb, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
- aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
+ aclp = zfs_acl_alloc(zfs_acl_version(zsb->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
- if ((error = zfs_copy_ace_2_oldace(obj_type, aclp,
+ if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
return (error);
}
} else {
- if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp,
+ if ((error = zfs_copy_ace_2_fuid(zsb, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
}
/*
- * Set a files ACL
+ * Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- zilog_t *zilog = zfsvfs->z_log;
+ zfs_sb_t *zsb = ZTOZSB(zp);
+ zilog_t *zilog = zsb->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
uint64_t acl_obj;
if (mask == 0)
- return (ENOSYS);
+ return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
- return (EPERM);
+ return (SET_ERROR(EPERM));
- if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))
+ if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
- error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
+ error = zfs_vsec_2_aclp(zsb, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
- tx = dmu_tx_create(zfsvfs->z_os);
+ tx = dmu_tx_create(zsb->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
- fuid_dirtied = zfsvfs->z_fuid_dirty;
+ fuid_dirtied = zsb->z_fuid_dirty;
if (fuid_dirtied)
- zfs_fuid_txhold(zfsvfs, tx);
+ zfs_fuid_txhold(zsb, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
- if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
+ if (zsb->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
- zfs_fuid_sync(zfsvfs, tx);
+ zfs_fuid_sync(zsb, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
-done:
+
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
- if ((v4_mode & WRITE_MASK) &&
- (zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
- (!IS_DEVVP(ZTOV(zp)) ||
- (IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) {
- return (EROFS);
+ if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
+ (!S_ISDEV(ZTOI(zp)->i_mode) ||
+ (S_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
+ return (SET_ERROR(EROFS));
}
/*
* Only check for READONLY on non-directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
- (((ZTOV(zp)->v_type != VDIR) &&
+ ((!S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & (ZFS_READONLY | ZFS_IMMUTABLE))) ||
- (ZTOV(zp)->v_type == VDIR &&
+ (S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_IMMUTABLE)))) {
- return (EPERM);
+ return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
- return (EPERM);
+ return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
- return (EACCES);
+ return (SET_ERROR(EACCES));
}
return (0);
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
- uint64_t who;
+ uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
ASSERT(zp->z_acl_cached);
- while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
- &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
+ &iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
- if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE))
+ if (S_ISDIR(ZTOI(zp)->i_mode) &&
+ (iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
who = gowner;
/*FALLTHROUGH*/
case ACE_IDENTIFIER_GROUP:
- checkit = zfs_groupmember(zfsvfs, who, cr);
+ checkit = zfs_groupmember(zsb, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
if (entry_type == 0) {
uid_t newid;
- newid = zfs_fuid_map_id(zfsvfs, who, cr,
+ newid = zfs_fuid_map_id(zsb, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
break;
} else {
mutex_exit(&zp->z_acl_lock);
- return (EIO);
+ return (SET_ERROR(EIO));
}
}
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
- return (EACCES);
+ return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
- owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
- return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
+ owner = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
+ return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ zfs_sb_t *zsb = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
/*
* Short circuit empty requests
*/
- if (v4_mode == 0 || zfsvfs->z_replay) {
+ if (v4_mode == 0 || zsb->z_replay) {
*working_mode = 0;
return (0);
}
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
- return (EACCES);
+ return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
- return (EACCES);
+ return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
- (ZTOV(zdp)->v_type == VDIR));
+ (S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
- ZFS_ENTER(zdp->z_zfsvfs);
+ ZFS_ENTER(ZTOZSB(zdp));
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr);
- ZFS_EXIT(zdp->z_zfsvfs);
+ ZFS_EXIT(ZTOZSB(zdp));
return (error);
}
/*
* Determine whether Access should be granted/denied.
+ *
* The least priv subsytem is always consulted as a basic privilege
* can define any form of access.
*/
{
uint32_t working_mode;
int error;
- int is_attr;
- boolean_t check_privs;
- znode_t *xzp;
- znode_t *check_zp = zp;
+ boolean_t check_privs;
+ znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
- is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
-
/*
* If attribute then validate against base file
*/
- if (is_attr) {
+ if ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode)) {
uint64_t parent;
- if ((error = sa_lookup(zp->z_sa_hdl,
- SA_ZPL_PARENT(zp->z_zfsvfs), &parent,
- sizeof (parent))) != 0)
- return (error);
+ rw_enter(&zp->z_xattr_lock, RW_READER);
+ if (zp->z_xattr_parent) {
+ check_zp = zp->z_xattr_parent;
+ rw_exit(&zp->z_xattr_lock);
- if ((error = zfs_zget(zp->z_zfsvfs,
- parent, &xzp)) != 0) {
- return (error);
- }
+ /*
+ * Verify a lookup yields the same znode.
+ */
+ ASSERT3S(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(
+ ZTOZSB(zp)), &parent, sizeof (parent)), ==, 0);
+ ASSERT3U(check_zp->z_id, ==, parent);
+ } else {
+ rw_exit(&zp->z_xattr_lock);
+
+ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(
+ ZTOZSB(zp)), &parent, sizeof (parent));
+ if (error)
+ return (error);
- check_zp = xzp;
+ /*
+ * Cache the lookup on the parent file znode as
+ * zp->z_xattr_parent and hold a reference. This
+ * effectively pins the parent in memory until all
+ * child xattr znodes have been destroyed and
+ * release their references in zfs_inode_destroy().
+ */
+ error = zfs_zget(ZTOZSB(zp), parent, &check_zp);
+ if (error)
+ return (error);
+
+ rw_enter(&zp->z_xattr_lock, RW_WRITER);
+ if (zp->z_xattr_parent == NULL)
+ zp->z_xattr_parent = check_zp;
+ rw_exit(&zp->z_xattr_lock);
+ }
/*
* fixup mode to map to xattr perms
}
}
- owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
+ owner = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
/*
- * Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
- * in needed_bits. Map the bits mapped by working_mode (currently
- * missing) in missing_bits.
+ * Map the bits required to the standard inode flags
+ * S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
+ * mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
- needed_bits |= VREAD;
+ needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
- needed_bits |= VWRITE;
+ needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
- needed_bits |= VEXEC;
+ needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
- if (is_attr)
- VN_RELE(ZTOV(xzp));
- return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
+ return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
- if (is_attr)
- VN_RELE(ZTOV(xzp));
return (error);
}
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
- checkmode |= VREAD;
+ checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
- checkmode |= VWRITE;
+ checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
- checkmode |= VEXEC;
+ checkmode |= S_IXUSR;
- error = secpolicy_vnode_access2(cr, ZTOV(check_zp), owner,
+ error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
- error = EACCES;
+ error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
- error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
+ error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
-
- if (is_attr)
- VN_RELE(ZTOV(xzp));
-
return (error);
}
/*
- * Translate traditional unix VREAD/VWRITE/VEXEC mode into
+ * Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* native ACL format and call zfs_zaccess()
*/
int
int error;
uid_t downer;
- downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
+ downer = zfs_fuid_map_id(ZTOZSB(dzp), dzp->z_uid, cr, ZFS_OWNER);
- error = secpolicy_vnode_access2(cr, ZTOV(dzp),
- downer, available_perms, VWRITE|VEXEC);
+ error = secpolicy_vnode_access2(cr, ZTOI(dzp),
+ downer, available_perms, S_IWUSR|S_IXUSR);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
* Determine whether Access should be granted/deny, without
* consulting least priv subsystem.
*
- *
* The following chart is the recommended NFSv4 enforcement for
* ability to delete an object.
*
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
- return (EPERM);
+ return (SET_ERROR(EPERM));
/*
* First row
* Fourth row
*/
- available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE;
- available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC;
+ available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : S_IWUSR;
+ available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : S_IXUSR;
return (zfs_delete_final_check(zp, dzp, available_perms, cr));
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
- return (EACCES);
+ return (SET_ERROR(EACCES));
- add_perm = (ZTOV(szp)->v_type == VDIR) ?
+ add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* If that succeeds then check for add_file/add_subdir permissions
*/
- if (error = zfs_zaccess_delete(sdzp, szp, cr))
+ if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
- if (error = zfs_zaccess_delete(tdzp, tzp, cr))
+ if ((error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
}