]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/ax25/ax25_uid.c
x86/speculation/mmio: Enable CPU Fill buffer clearing on idle
[mirror_ubuntu-jammy-kernel.git] / net / ax25 / ax25_uid.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 */
6
7 #include <linux/capability.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
11 #include <linux/in.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/spinlock.h>
18 #include <linux/slab.h>
19 #include <net/ax25.h>
20 #include <linux/inet.h>
21 #include <linux/netdevice.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
24 #include <net/sock.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/mm.h>
28 #include <linux/interrupt.h>
29 #include <linux/list.h>
30 #include <linux/notifier.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/stat.h>
34 #include <linux/sysctl.h>
35 #include <linux/export.h>
36 #include <net/ip.h>
37 #include <net/arp.h>
38
39 /*
40 * Callsign/UID mapper. This is in kernel space for security on multi-amateur machines.
41 */
42
43 static HLIST_HEAD(ax25_uid_list);
44 static DEFINE_RWLOCK(ax25_uid_lock);
45
46 int ax25_uid_policy;
47
48 EXPORT_SYMBOL(ax25_uid_policy);
49
50 ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
51 {
52 ax25_uid_assoc *ax25_uid, *res = NULL;
53
54 read_lock(&ax25_uid_lock);
55 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
56 if (uid_eq(ax25_uid->uid, uid)) {
57 ax25_uid_hold(ax25_uid);
58 res = ax25_uid;
59 break;
60 }
61 }
62 read_unlock(&ax25_uid_lock);
63
64 return res;
65 }
66
67 EXPORT_SYMBOL(ax25_findbyuid);
68
69 int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
70 {
71 ax25_uid_assoc *ax25_uid;
72 ax25_uid_assoc *user;
73 unsigned long res;
74
75 switch (cmd) {
76 case SIOCAX25GETUID:
77 res = -ENOENT;
78 read_lock(&ax25_uid_lock);
79 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
80 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
81 res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
82 break;
83 }
84 }
85 read_unlock(&ax25_uid_lock);
86
87 return res;
88
89 case SIOCAX25ADDUID:
90 {
91 kuid_t sax25_kuid;
92 if (!capable(CAP_NET_ADMIN))
93 return -EPERM;
94 sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
95 if (!uid_valid(sax25_kuid))
96 return -EINVAL;
97 user = ax25_findbyuid(sax25_kuid);
98 if (user) {
99 ax25_uid_put(user);
100 return -EEXIST;
101 }
102 if (sax->sax25_uid == 0)
103 return -EINVAL;
104 if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL)
105 return -ENOMEM;
106
107 refcount_set(&ax25_uid->refcount, 1);
108 ax25_uid->uid = sax25_kuid;
109 ax25_uid->call = sax->sax25_call;
110
111 write_lock(&ax25_uid_lock);
112 hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list);
113 write_unlock(&ax25_uid_lock);
114
115 return 0;
116 }
117 case SIOCAX25DELUID:
118 if (!capable(CAP_NET_ADMIN))
119 return -EPERM;
120
121 ax25_uid = NULL;
122 write_lock(&ax25_uid_lock);
123 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
124 if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
125 break;
126 }
127 if (ax25_uid == NULL) {
128 write_unlock(&ax25_uid_lock);
129 return -ENOENT;
130 }
131 hlist_del_init(&ax25_uid->uid_node);
132 ax25_uid_put(ax25_uid);
133 write_unlock(&ax25_uid_lock);
134
135 return 0;
136
137 default:
138 return -EINVAL;
139 }
140
141 return -EINVAL; /*NOTREACHED */
142 }
143
144 #ifdef CONFIG_PROC_FS
145
146 static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
147 __acquires(ax25_uid_lock)
148 {
149 read_lock(&ax25_uid_lock);
150 return seq_hlist_start_head(&ax25_uid_list, *pos);
151 }
152
153 static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
154 {
155 return seq_hlist_next(v, &ax25_uid_list, pos);
156 }
157
158 static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
159 __releases(ax25_uid_lock)
160 {
161 read_unlock(&ax25_uid_lock);
162 }
163
164 static int ax25_uid_seq_show(struct seq_file *seq, void *v)
165 {
166 char buf[11];
167
168 if (v == SEQ_START_TOKEN)
169 seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
170 else {
171 struct ax25_uid_assoc *pt;
172
173 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
174 seq_printf(seq, "%6d %s\n",
175 from_kuid_munged(seq_user_ns(seq), pt->uid),
176 ax2asc(buf, &pt->call));
177 }
178 return 0;
179 }
180
181 const struct seq_operations ax25_uid_seqops = {
182 .start = ax25_uid_seq_start,
183 .next = ax25_uid_seq_next,
184 .stop = ax25_uid_seq_stop,
185 .show = ax25_uid_seq_show,
186 };
187 #endif
188
189 /*
190 * Free all memory associated with UID/Callsign structures.
191 */
192 void __exit ax25_uid_free(void)
193 {
194 ax25_uid_assoc *ax25_uid;
195
196 write_lock(&ax25_uid_lock);
197 again:
198 ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
199 hlist_del_init(&ax25_uid->uid_node);
200 ax25_uid_put(ax25_uid);
201 goto again;
202 }
203 write_unlock(&ax25_uid_lock);
204 }