int aa_path_name(const struct path *path, int flags, char **buffer,
const char **name, const char **info);
+#define MAX_PATH_BUFFERS 2
+
+/* Per cpu buffers used during mediation */
+/* preallocated buffers to use during path lookups */
+struct aa_buffers {
+ char *buf[MAX_PATH_BUFFERS];
+};
+
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+
+DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
+
+#define COUNT_ARGS(X...) COUNT_ARGS_HELPER(, ##X, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#define COUNT_ARGS_HELPER(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, X...) n
+#define CONCAT(X, Y) X ## Y
+#define CONCAT_AFTER(X, Y) CONCAT(X, Y)
+
+#define ASSIGN(FN, X, N) ((X) = FN(N))
+#define EVAL1(FN, X) ASSIGN(FN, X, 0) /*X = FN(0)*/
+#define EVAL2(FN, X, Y...) do { ASSIGN(FN, X, 1); EVAL1(FN, Y); } while (0)
+#define EVAL(FN, X...) CONCAT_AFTER(EVAL, COUNT_ARGS(X))(FN, X)
+
+#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
+
+#ifdef CONFIG_DEBUG_PREEMPT
+#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
+#else
+#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
+#endif
+
+#define __get_buffer(N) ({ \
+ struct aa_buffers *__cpu_var; \
+ AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
+ __cpu_var = this_cpu_ptr(&aa_buffers); \
+ __cpu_var->buf[(N)]; })
+
+#define __get_buffers(X...) EVAL(__get_buffer, X)
+
+#define __put_buffers(X, Y...) ((void)&(X))
+
+#define get_buffers(X...) \
+do { \
+ preempt_disable(); \
+ __get_buffers(X); \
+} while (0)
+
+#define put_buffers(X, Y...) \
+do { \
+ __put_buffers(X, Y); \
+ preempt_enable(); \
+} while (0)
+
#endif /* __AA_PATH_H */
/* Flag indicating whether initialization completed */
int apparmor_initialized __initdata;
+DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
+
+
/*
* LSM hook functions
*/
return 0;
}
+static void destroy_buffers(void)
+{
+ u32 i, j;
+
+ for_each_possible_cpu(i) {
+ for_each_cpu_buffer(j) {
+ kfree(per_cpu(aa_buffers, i).buf[j]);
+ per_cpu(aa_buffers, i).buf[j] = NULL;
+ }
+ }
+}
+
+static int __init alloc_buffers(void)
+{
+ u32 i, j;
+
+ for_each_possible_cpu(i) {
+ for_each_cpu_buffer(j) {
+ char *buffer;
+
+ if (cpu_to_node(i) > num_online_nodes())
+ /* fallback to kmalloc for offline nodes */
+ buffer = kmalloc(aa_g_path_max, GFP_KERNEL);
+ else
+ buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL,
+ cpu_to_node(i));
+ if (!buffer) {
+ destroy_buffers();
+ return -ENOMEM;
+ }
+ per_cpu(aa_buffers, i).buf[j] = buffer;
+ }
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_SYSCTL
static int apparmor_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
}
+ error = alloc_buffers();
+ if (error) {
+ AA_ERROR("Unable to allocate work buffers\n");
+ goto buffers_out;
+ }
+
error = set_init_ctx();
if (error) {
AA_ERROR("Failed to set context on init task\n");
aa_free_root_ns();
- goto alloc_out;
+ goto buffers_out;
}
security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks));
return error;
+buffers_out:
+ destroy_buffers();
+
alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();