void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
{
- BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
- if (atomic_dec_and_test(&rds_ibdev->refcount))
+ BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
+ if (refcount_dec_and_test(&rds_ibdev->refcount))
queue_work(rds_wq, &rds_ibdev->free_work);
}
return;
spin_lock_init(&rds_ibdev->spinlock);
- atomic_set(&rds_ibdev->refcount, 1);
+ refcount_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
- atomic_inc(&rds_ibdev->refcount);
+ refcount_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
- atomic_inc(&rds_ibdev->refcount);
+ refcount_inc(&rds_ibdev->refcount);
rds_ib_nodev_connect();
rcu_read_lock();
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (rds_ibdev)
- atomic_inc(&rds_ibdev->refcount);
+ refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
unsigned int max_initiator_depth;
unsigned int max_responder_resources;
spinlock_t spinlock; /* protect the above */
- atomic_t refcount;
+ refcount_t refcount;
struct work_struct free_work;
int *vector_load;
};
list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
- atomic_inc(&rds_ibdev->refcount);
+ refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
spin_unlock_irq(&ib_nodev_conns_lock);
ic->rds_ibdev = rds_ibdev;
- atomic_inc(&rds_ibdev->refcount);
+ refcount_inc(&rds_ibdev->refcount);
}
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)