]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - drivers/target/target_core_transport.c
KVM: PPC: Use preregistered memory API to access TCE list
[mirror_ubuntu-zesty-kernel.git] / drivers / target / target_core_transport.c
index 1cadc9eefa21a47e783160b874dbd2ce02f8f05f..166177be22e51328768a3bf619e903ea5b9f5fb3 100644 (file)
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
 struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
-               struct se_device *dev);
+               struct se_device *dev, int err, bool write_pending);
 static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -457,8 +458,20 @@ static void target_complete_nacl(struct kref *kref)
 {
        struct se_node_acl *nacl = container_of(kref,
                                struct se_node_acl, acl_kref);
+       struct se_portal_group *se_tpg = nacl->se_tpg;
 
-       complete(&nacl->acl_free_comp);
+       if (!nacl->dynamic_stop) {
+               complete(&nacl->acl_free_comp);
+               return;
+       }
+
+       mutex_lock(&se_tpg->acl_node_mutex);
+       list_del(&nacl->acl_list);
+       mutex_unlock(&se_tpg->acl_node_mutex);
+
+       core_tpg_wait_for_nacl_pr_ref(nacl);
+       core_free_device_list_for_node(nacl, se_tpg);
+       kfree(nacl);
 }
 
 void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +512,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
 void transport_free_session(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
        /*
         * Drop the se_node_acl->nacl_kref obtained from within
         * core_tpg_get_initiator_node_acl().
         */
        if (se_nacl) {
+               struct se_portal_group *se_tpg = se_nacl->se_tpg;
+               const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+               unsigned long flags;
+
                se_sess->se_node_acl = NULL;
+
+               /*
+                * Also determine if we need to drop the extra ->cmd_kref if
+                * it had been previously dynamically generated, and
+                * the endpoint is not caching dynamic ACLs.
+                */
+               mutex_lock(&se_tpg->acl_node_mutex);
+               if (se_nacl->dynamic_node_acl &&
+                   !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+                       spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+                       if (list_empty(&se_nacl->acl_sess_list))
+                               se_nacl->dynamic_stop = true;
+                       spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+                       if (se_nacl->dynamic_stop)
+                               list_del(&se_nacl->acl_list);
+               }
+               mutex_unlock(&se_tpg->acl_node_mutex);
+
+               if (se_nacl->dynamic_stop)
+                       target_put_nacl(se_nacl);
+
                target_put_nacl(se_nacl);
        }
        if (se_sess->sess_cmd_map) {
@@ -518,16 +558,12 @@ EXPORT_SYMBOL(transport_free_session);
 void transport_deregister_session(struct se_session *se_sess)
 {
        struct se_portal_group *se_tpg = se_sess->se_tpg;
-       const struct target_core_fabric_ops *se_tfo;
-       struct se_node_acl *se_nacl;
        unsigned long flags;
-       bool drop_nacl = false;
 
        if (!se_tpg) {
                transport_free_session(se_sess);
                return;
        }
-       se_tfo = se_tpg->se_tpg_tfo;
 
        spin_lock_irqsave(&se_tpg->session_lock, flags);
        list_del(&se_sess->sess_list);
@@ -535,33 +571,15 @@ void transport_deregister_session(struct se_session *se_sess)
        se_sess->fabric_sess_ptr = NULL;
        spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 
-       /*
-        * Determine if we need to do extra work for this initiator node's
-        * struct se_node_acl if it had been previously dynamically generated.
-        */
-       se_nacl = se_sess->se_node_acl;
-
-       mutex_lock(&se_tpg->acl_node_mutex);
-       if (se_nacl && se_nacl->dynamic_node_acl) {
-               if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
-                       list_del(&se_nacl->acl_list);
-                       drop_nacl = true;
-               }
-       }
-       mutex_unlock(&se_tpg->acl_node_mutex);
-
-       if (drop_nacl) {
-               core_tpg_wait_for_nacl_pr_ref(se_nacl);
-               core_free_device_list_for_node(se_nacl, se_tpg);
-               se_sess->se_node_acl = NULL;
-               kfree(se_nacl);
-       }
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
        /*
         * If last kref is dropping now for an explicit NodeACL, awake sleeping
         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
         * removal context from within transport_free_session() code.
+        *
+        * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+        * to release all remaining generate_node_acl=1 created ACL resources.
         */
 
        transport_free_session(se_sess);
@@ -811,7 +829,8 @@ void target_qf_do_work(struct work_struct *work)
 
                if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
                        transport_write_pending_qf(cmd);
-               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+                        cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
                        transport_complete_qf(cmd);
        }
 }
@@ -1724,7 +1743,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                }
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                goto check_stop;
        default:
@@ -1735,7 +1754,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        }
 
        ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
 check_stop:
@@ -1744,8 +1763,7 @@ check_stop:
        return;
 
 queue_full:
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
@@ -1984,13 +2002,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
        int ret = 0;
 
        transport_complete_task_attr(cmd);
+       /*
+        * If a fabric driver ->write_pending() or ->queue_data_in() callback
+        * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+        * the same callbacks should not be retried.  Return CHECK_CONDITION
+        * if a scsi_status is not already set.
+        *
+        * If a fabric driver ->queue_status() has returned non zero, always
+        * keep retrying no matter what..
+        */
+       if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+               if (cmd->scsi_status)
+                       goto queue_status;
 
-       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               goto out;
+               cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+               cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+               cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+               translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+               goto queue_status;
        }
 
+       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+               goto queue_status;
+
        switch (cmd->data_direction) {
        case DMA_FROM_DEVICE:
                if (cmd->scsi_status)
@@ -2014,19 +2048,33 @@ queue_status:
                break;
        }
 
-out:
        if (ret < 0) {
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
                return;
        }
        transport_lun_remove_cmd(cmd);
        transport_cmd_check_stop_to_fabric(cmd);
 }
 
-static void transport_handle_queue_full(
-       struct se_cmd *cmd,
-       struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+                                       int err, bool write_pending)
 {
+       /*
+        * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+        * ->queue_data_in() callbacks from new process context.
+        *
+        * Otherwise for other errors, transport_complete_qf() will send
+        * CHECK_CONDITION via ->queue_status() instead of attempting to
+        * retry associated fabric driver data-transfer callbacks.
+        */
+       if (err == -EAGAIN || err == -ENOMEM) {
+               cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+                                                TRANSPORT_COMPLETE_QF_OK;
+       } else {
+               pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+               cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+       }
+
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc_mb(&dev->dev_qf_count);
@@ -2090,7 +2138,7 @@ static void target_complete_ok_work(struct work_struct *work)
                WARN_ON(!cmd->scsi_status);
                ret = transport_send_check_condition_and_sense(
                                        cmd, 0, 1);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
 
                transport_lun_remove_cmd(cmd);
@@ -2116,7 +2164,7 @@ static void target_complete_ok_work(struct work_struct *work)
                } else if (rc) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                rc, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2141,7 +2189,7 @@ queue_rsp:
                if (target_read_prot_action(cmd)) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                cmd->pi_err, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2151,7 +2199,7 @@ queue_rsp:
 
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_data_in(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
@@ -2164,7 +2212,7 @@ queue_rsp:
                        atomic_long_add(cmd->data_length,
                                        &cmd->se_lun->lun_stats.tx_data_octets);
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
                        break;
                }
@@ -2173,7 +2221,7 @@ queue_rsp:
 queue_status:
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        default:
@@ -2187,8 +2235,8 @@ queue_status:
 queue_full:
        pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
                " data_direction: %d\n", cmd, cmd->data_direction);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2438,18 +2486,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        transport_cmd_check_stop(cmd, false, true);
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
-       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
-       WARN_ON(ret);
-
-       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return 0;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        return 0;
 }
 EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2459,10 +2503,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        int ret;
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM) {
+       if (ret) {
                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
                         cmd);
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        }
 }
 
@@ -2689,10 +2733,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
+static void target_lun_confirm(struct percpu_ref *ref)
+{
+       struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+       complete(&lun->lun_ref_comp);
+}
+
 void transport_clear_lun_ref(struct se_lun *lun)
 {
-       percpu_ref_kill(&lun->lun_ref);
+       /*
+        * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
+        * the initial reference and schedule confirm kill to be
+        * executed after one full RCU grace period has completed.
+        */
+       percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
+       /*
+        * The first completion waits for percpu_ref_switch_to_atomic_rcu()
+        * to call target_lun_confirm after lun->lun_ref has been marked
+        * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
+        * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
+        * fails for all new incoming I/O.
+        */
        wait_for_completion(&lun->lun_ref_comp);
+       /*
+        * The second completion waits for percpu_ref_put_many() to
+        * invoke ->release() after lun->lun_ref has switched to
+        * atomic_t mode, and lun->lun_ref.count has reached zero.
+        *
+        * At this point all target-core lun->lun_ref references have
+        * been dropped via transport_lun_remove_cmd(), and it's safe
+        * to proceed with the remaining LUN shutdown.
+        */
+       wait_for_completion(&lun->lun_shutdown_comp);
 }
 
 static bool
@@ -2975,6 +3048,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
+       int ret;
+
        assert_spin_locked(&cmd->t_state_lock);
        WARN_ON_ONCE(!irqs_disabled());
 
@@ -2998,7 +3073,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        trace_target_cmd_complete(cmd);
 
        spin_unlock_irq(&cmd->t_state_lock);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
        spin_lock_irq(&cmd->t_state_lock);
 
        return 1;
@@ -3019,6 +3096,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
 void transport_send_task_abort(struct se_cmd *cmd)
 {
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3054,7 +3132,9 @@ send_abort:
                 cmd->t_task_cdb[0], cmd->tag);
 
        trace_target_cmd_complete(cmd);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 static void target_tmr_work(struct work_struct *work)
@@ -3110,7 +3190,6 @@ static void target_tmr_work(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto check_stop;
        }
-       cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3123,11 +3202,25 @@ int transport_generic_handle_tmr(
        struct se_cmd *cmd)
 {
        unsigned long flags;
+       bool aborted = false;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->transport_state |= CMD_T_ACTIVE;
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               aborted = true;
+       } else {
+               cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+               cmd->transport_state |= CMD_T_ACTIVE;
+       }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
+       if (aborted) {
+               pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+                       "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+                       cmd->se_tmr_req->ref_task_tag, cmd->tag);
+               transport_cmd_check_stop_to_fabric(cmd);
+               return 0;
+       }
+
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;