res = -EINVAL;
break;
}
- ax25->rtt = (opt * HZ) / 2;
+ ax25->rtt = (opt * HZ) >> 1;
ax25->t1 = opt * HZ;
break;
#ifdef CONFIG_PROC_FS
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
+ __acquires(ax25_list_lock)
{
struct ax25_cb *ax25;
struct hlist_node *node;
}
static void ax25_info_stop(struct seq_file *seq, void *v)
+ __releases(ax25_list_lock)
{
spin_unlock_bh(&ax25_list_lock);
}
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
- (sk->sk_rcvbuf / 2) &&
+ (sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
#ifdef CONFIG_PROC_FS
static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(ax25_route_lock)
{
struct ax25_route *ax25_rt;
int i = 1;
}
static void ax25_rt_seq_stop(struct seq_file *seq, void *v)
+ __releases(ax25_route_lock)
{
read_unlock(&ax25_route_lock);
}
void ax25_std_heartbeat_expiry(ax25_cb *ax25)
{
- struct sock *sk=ax25->sk;
+ struct sock *sk = ax25->sk;
if (sk)
bh_lock_sock(sk);
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
- (sk->sk_rcvbuf / 2) &&
+ (sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
* Callsign/UID mapper. This is in kernel space for security on multi-amateur machines.
*/
-HLIST_HEAD(ax25_uid_list);
+static HLIST_HEAD(ax25_uid_list);
static DEFINE_RWLOCK(ax25_uid_lock);
-int ax25_uid_policy = 0;
+int ax25_uid_policy;
EXPORT_SYMBOL(ax25_uid_policy);
#ifdef CONFIG_PROC_FS
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(ax25_uid_lock)
{
struct ax25_uid_assoc *pt;
struct hlist_node *node;
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
+ __releases(ax25_uid_lock)
{
read_unlock(&ax25_uid_lock);
}