]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - drivers/target/target_core_transport.c
target: Follow up core updates from AGrover and HCH (round 4)
[mirror_ubuntu-zesty-kernel.git] / drivers / target / target_core_transport.c
index c743d94baf775c342e77326a79d0053704c2302a..55b6588904a4a8cd8aa039a65e8001c32649a310 100644 (file)
 #include "target_core_scdb.h"
 #include "target_core_ua.h"
 
-/* #define DEBUG_CDB_HANDLER */
-#ifdef DEBUG_CDB_HANDLER
-#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CDB_H(x...)
-#endif
-
-/* #define DEBUG_CMD_MAP */
-#ifdef DEBUG_CMD_MAP
-#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CMD_M(x...)
-#endif
-
-/* #define DEBUG_MEM_ALLOC */
-#ifdef DEBUG_MEM_ALLOC
-#define DEBUG_MEM(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM(x...)
-#endif
-
-/* #define DEBUG_MEM2_ALLOC */
-#ifdef DEBUG_MEM2_ALLOC
-#define DEBUG_MEM2(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM2(x...)
-#endif
-
-/* #define DEBUG_SG_CALC */
-#ifdef DEBUG_SG_CALC
-#define DEBUG_SC(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SC(x...)
-#endif
-
-/* #define DEBUG_SE_OBJ */
-#ifdef DEBUG_SE_OBJ
-#define DEBUG_SO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SO(x...)
-#endif
-
-/* #define DEBUG_CMD_VOL */
-#ifdef DEBUG_CMD_VOL
-#define DEBUG_VOL(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_VOL(x...)
-#endif
-
-/* #define DEBUG_CMD_STOP */
-#ifdef DEBUG_CMD_STOP
-#define DEBUG_CS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CS(x...)
-#endif
-
-/* #define DEBUG_PASSTHROUGH */
-#ifdef DEBUG_PASSTHROUGH
-#define DEBUG_PT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_PT(x...)
-#endif
-
-/* #define DEBUG_TASK_STOP */
-#ifdef DEBUG_TASK_STOP
-#define DEBUG_TS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TS(x...)
-#endif
-
-/* #define DEBUG_TRANSPORT_STOP */
-#ifdef DEBUG_TRANSPORT_STOP
-#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TRANSPORT_S(x...)
-#endif
-
-/* #define DEBUG_TASK_FAILURE */
-#ifdef DEBUG_TASK_FAILURE
-#define DEBUG_TF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TF(x...)
-#endif
-
-/* #define DEBUG_DEV_OFFLINE */
-#ifdef DEBUG_DEV_OFFLINE
-#define DEBUG_DO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_DO(x...)
-#endif
-
-/* #define DEBUG_TASK_STATE */
-#ifdef DEBUG_TASK_STATE
-#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TSTATE(x...)
-#endif
-
-/* #define DEBUG_STATUS_THR */
-#ifdef DEBUG_STATUS_THR
-#define DEBUG_ST(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_ST(x...)
-#endif
-
-/* #define DEBUG_TASK_TIMEOUT */
-#ifdef DEBUG_TASK_TIMEOUT
-#define DEBUG_TT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TT(x...)
-#endif
-
-/* #define DEBUG_GENERIC_REQUEST_FAILURE */
-#ifdef DEBUG_GENERIC_REQUEST_FAILURE
-#define DEBUG_GRF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_GRF(x...)
-#endif
-
-/* #define DEBUG_SAM_TASK_ATTRS */
-#ifdef DEBUG_SAM_TASK_ATTRS
-#define DEBUG_STA(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_STA(x...)
-#endif
-
 static int sub_api_initialized;
 
 static struct kmem_cache *se_cmd_cache;
@@ -225,62 +99,62 @@ int init_se_kmem_caches(void)
 {
        se_cmd_cache = kmem_cache_create("se_cmd_cache",
                        sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!(se_cmd_cache)) {
-               printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+       if (!se_cmd_cache) {
+               pr_err("kmem_cache_create for struct se_cmd failed\n");
                goto out;
        }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
-       if (!(se_tmr_req_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+       if (!se_tmr_req_cache) {
+               pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
                goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
                        0, NULL);
-       if (!(se_sess_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for struct se_session"
+       if (!se_sess_cache) {
+               pr_err("kmem_cache_create() for struct se_session"
                                " failed\n");
                goto out;
        }
        se_ua_cache = kmem_cache_create("se_ua_cache",
                        sizeof(struct se_ua), __alignof__(struct se_ua),
                        0, NULL);
-       if (!(se_ua_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+       if (!se_ua_cache) {
+               pr_err("kmem_cache_create() for struct se_ua failed\n");
                goto out;
        }
        t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
                        sizeof(struct t10_pr_registration),
                        __alignof__(struct t10_pr_registration), 0, NULL);
-       if (!(t10_pr_reg_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+       if (!t10_pr_reg_cache) {
+               pr_err("kmem_cache_create() for struct t10_pr_registration"
                                " failed\n");
                goto out;
        }
        t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
                        sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
                        0, NULL);
-       if (!(t10_alua_lu_gp_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+       if (!t10_alua_lu_gp_cache) {
+               pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
                                " failed\n");
                goto out;
        }
        t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
                        sizeof(struct t10_alua_lu_gp_member),
                        __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
-       if (!(t10_alua_lu_gp_mem_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+       if (!t10_alua_lu_gp_mem_cache) {
+               pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
                                "cache failed\n");
                goto out;
        }
        t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
                        sizeof(struct t10_alua_tg_pt_gp),
                        __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
-       if (!(t10_alua_tg_pt_gp_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+       if (!t10_alua_tg_pt_gp_cache) {
+               pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
                                "cache failed\n");
                goto out;
        }
@@ -289,8 +163,8 @@ int init_se_kmem_caches(void)
                        sizeof(struct t10_alua_tg_pt_gp_member),
                        __alignof__(struct t10_alua_tg_pt_gp_member),
                        0, NULL);
-       if (!(t10_alua_tg_pt_gp_mem_cache)) {
-               printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+       if (!t10_alua_tg_pt_gp_mem_cache) {
+               pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
                                "mem_t failed\n");
                goto out;
        }
@@ -366,19 +240,19 @@ static int transport_subsystem_reqmods(void)
 
        ret = request_module("target_core_iblock");
        if (ret != 0)
-               printk(KERN_ERR "Unable to load target_core_iblock\n");
+               pr_err("Unable to load target_core_iblock\n");
 
        ret = request_module("target_core_file");
        if (ret != 0)
-               printk(KERN_ERR "Unable to load target_core_file\n");
+               pr_err("Unable to load target_core_file\n");
 
        ret = request_module("target_core_pscsi");
        if (ret != 0)
-               printk(KERN_ERR "Unable to load target_core_pscsi\n");
+               pr_err("Unable to load target_core_pscsi\n");
 
        ret = request_module("target_core_stgt");
        if (ret != 0)
-               printk(KERN_ERR "Unable to load target_core_stgt\n");
+               pr_err("Unable to load target_core_stgt\n");
 
        return 0;
 }
@@ -405,8 +279,8 @@ struct se_session *transport_init_session(void)
        struct se_session *se_sess;
 
        se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
-       if (!(se_sess)) {
-               printk(KERN_ERR "Unable to allocate struct se_session from"
+       if (!se_sess) {
+               pr_err("Unable to allocate struct se_session from"
                                " se_sess_cache\n");
                return ERR_PTR(-ENOMEM);
        }
@@ -460,7 +334,7 @@ void __transport_register_session(
        }
        list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
 
-       printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+       pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
                se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
 }
 EXPORT_SYMBOL(__transport_register_session);
@@ -485,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
         * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
         */
        se_nacl = se_sess->se_node_acl;
-       if ((se_nacl)) {
+       if (se_nacl) {
                spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
                list_del(&se_sess->sess_acl_list);
                /*
@@ -516,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
        struct se_portal_group *se_tpg = se_sess->se_tpg;
        struct se_node_acl *se_nacl;
 
-       if (!(se_tpg)) {
+       if (!se_tpg) {
                transport_free_session(se_sess);
                return;
        }
@@ -532,11 +406,11 @@ void transport_deregister_session(struct se_session *se_sess)
         * struct se_node_acl if it had been previously dynamically generated.
         */
        se_nacl = se_sess->se_node_acl;
-       if ((se_nacl)) {
+       if (se_nacl) {
                spin_lock_bh(&se_tpg->acl_node_lock);
                if (se_nacl->dynamic_node_acl) {
-                       if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
-                                       se_tpg))) {
+                       if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
+                                       se_tpg)) {
                                list_del(&se_nacl->acl_list);
                                se_tpg->num_node_acls--;
                                spin_unlock_bh(&se_tpg->acl_node_lock);
@@ -553,7 +427,7 @@ void transport_deregister_session(struct se_session *se_sess)
 
        transport_free_session(se_sess);
 
-       printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+       pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
 }
 EXPORT_SYMBOL(transport_deregister_session);
@@ -569,19 +443,19 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 
        list_for_each_entry(task, &cmd->t_task_list, t_list) {
                dev = task->se_dev;
-               if (!(dev))
+               if (!dev)
                        continue;
 
                if (atomic_read(&task->task_active))
                        continue;
 
-               if (!(atomic_read(&task->task_state_active)))
+               if (!atomic_read(&task->task_state_active))
                        continue;
 
                spin_lock_irqsave(&dev->execute_task_lock, flags);
                list_del(&task->t_state_list);
-               DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
-                       cmd->se_tfo->tfo_get_task_tag(cmd), dev, task);
+               pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+                       cmd->se_tfo->get_task_tag(cmd), dev, task);
                spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
                atomic_set(&task->task_state_active, 0);
@@ -610,7 +484,7 @@ static int transport_cmd_check_stop(
         * command for LUN shutdown purposes.
         */
        if (atomic_read(&cmd->transport_lun_stop)) {
-               DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)"
+               pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
                        " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
                        cmd->se_tfo->get_task_tag(cmd));
 
@@ -629,7 +503,7 @@ static int transport_cmd_check_stop(
         * this command for frontend exceptions.
         */
        if (atomic_read(&cmd->t_transport_stop)) {
-               DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) =="
+               pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
                        " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
                        cmd->se_tfo->get_task_tag(cmd));
 
@@ -695,7 +569,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
                return;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(atomic_read(&cmd->transport_dev_active))) {
+       if (!atomic_read(&cmd->transport_dev_active)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto check_lun;
        }
@@ -710,7 +584,7 @@ check_lun:
                list_del(&cmd->se_lun_node);
                atomic_set(&cmd->transport_lun_active, 0);
 #if 0
-               printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+               pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
                        cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
 #endif
        }
@@ -797,7 +671,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
        unsigned long flags;
 
        spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (!(atomic_read(&cmd->t_transport_queue_active))) {
+       if (!atomic_read(&cmd->t_transport_queue_active)) {
                spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
                return;
        }
@@ -812,7 +686,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
        if (atomic_read(&cmd->t_transport_queue_active)) {
-               printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+               pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
                        cmd->se_tfo->get_task_tag(cmd),
                        atomic_read(&cmd->t_transport_queue_active));
        }
@@ -853,7 +727,7 @@ void transport_complete_task(struct se_task *task, int success)
        int t_state;
        unsigned long flags;
 #if 0
-       printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+       pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
                        cmd->t_task_cdb[0], dev);
 #endif
        if (dev)
@@ -899,8 +773,8 @@ void transport_complete_task(struct se_task *task, int success)
         * the processing thread.
         */
        if (atomic_read(&task->task_timeout)) {
-               if (!(atomic_dec_and_test(
-                               &cmd->t_task_cdbs_timeout_left))) {
+               if (!atomic_dec_and_test(
+                               &cmd->t_task_cdbs_timeout_left)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock,
                                flags);
                        return;
@@ -918,7 +792,7 @@ void transport_complete_task(struct se_task *task, int success)
         * struct se_task from struct se_cmd will complete itself into the
         * device queue depending upon int success.
         */
-       if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
+       if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
                if (!success)
                        cmd->t_tasks_failed = 1;
 
@@ -976,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
                                &task_prev->t_execute_list :
                                &dev->execute_task_list);
 
-               DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+               pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
                                " in execution queue\n",
-                               T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+                               task->task_se_cmd->t_task_cdb[0]);
                return 1;
        }
        /*
@@ -1020,7 +894,7 @@ static void __transport_add_task_to_execute_queue(
 
        atomic_set(&task->task_state_active, 1);
 
-       DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+       pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
                task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
                task, dev);
 }
@@ -1042,8 +916,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
                list_add_tail(&task->t_state_list, &dev->state_task_list);
                atomic_set(&task->task_state_active, 1);
 
-               DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
-                       task->se_cmd->se_tfo->get_task_tag(
+               pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+                       task->task_se_cmd->se_tfo->get_task_tag(
                        task->task_se_cmd), task, dev);
 
                spin_unlock(&dev->execute_task_lock);
@@ -1112,7 +986,7 @@ static void target_qf_do_work(struct work_struct *work)
                smp_mb__after_atomic_dec();
                spin_unlock_irq(&dev->qf_cmd_lock);
 
-               printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue"
+               pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
                        (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
                        (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
@@ -1197,7 +1071,7 @@ static void transport_release_all_cmds(struct se_device *dev)
                spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
                                flags);
 
-               printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+               pr_err("Releasing ITT: 0x%08x, i_state: %u,"
                        " t_state: %u directly\n",
                        cmd->se_tfo->get_task_tag(cmd),
                        cmd->se_tfo->get_cmd_state(cmd), t_state);
@@ -1264,7 +1138,7 @@ void transport_dump_vpd_proto_id(
        if (p_buf)
                strncpy(p_buf, buf, p_buf_len);
        else
-               printk(KERN_INFO "%s", buf);
+               pr_debug("%s", buf);
 }
 
 void
@@ -1314,7 +1188,7 @@ int transport_dump_vpd_assoc(
        if (p_buf)
                strncpy(p_buf, buf, p_buf_len);
        else
-               printk("%s", buf);
+               pr_debug("%s", buf);
 
        return ret;
 }
@@ -1374,7 +1248,7 @@ int transport_dump_vpd_ident_type(
                        return -EINVAL;
                strncpy(p_buf, buf, p_buf_len);
        } else {
-               printk("%s", buf);
+               pr_debug("%s", buf);
        }
 
        return ret;
@@ -1425,7 +1299,7 @@ int transport_dump_vpd_ident(
        if (p_buf)
                strncpy(p_buf, buf, p_buf_len);
        else
-               printk("%s", buf);
+               pr_debug("%s", buf);
 
        return ret;
 }
@@ -1482,7 +1356,7 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
        }
 
        dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
-       DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+       pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
                " device\n", dev->transport->name,
                dev->transport->get_device_rev(dev));
 }
@@ -1494,32 +1368,32 @@ static void scsi_dump_inquiry(struct se_device *dev)
        /*
         * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
         */
-       printk("  Vendor: ");
+       pr_debug("  Vendor: ");
        for (i = 0; i < 8; i++)
                if (wwn->vendor[i] >= 0x20)
-                       printk("%c", wwn->vendor[i]);
+                       pr_debug("%c", wwn->vendor[i]);
                else
-                       printk(" ");
+                       pr_debug(" ");
 
-       printk("  Model: ");
+       pr_debug("  Model: ");
        for (i = 0; i < 16; i++)
                if (wwn->model[i] >= 0x20)
-                       printk("%c", wwn->model[i]);
+                       pr_debug("%c", wwn->model[i]);
                else
-                       printk(" ");
+                       pr_debug(" ");
 
-       printk("  Revision: ");
+       pr_debug("  Revision: ");
        for (i = 0; i < 4; i++)
                if (wwn->revision[i] >= 0x20)
-                       printk("%c", wwn->revision[i]);
+                       pr_debug("%c", wwn->revision[i]);
                else
-                       printk(" ");
+                       pr_debug(" ");
 
-       printk("\n");
+       pr_debug("\n");
 
        device_type = dev->transport->get_device_type(dev);
-       printk("  Type:   %s ", scsi_device_type(device_type));
-       printk("                 ANSI SCSI revision: %02x\n",
+       pr_debug("  Type:   %s ", scsi_device_type(device_type));
+       pr_debug("                 ANSI SCSI revision: %02x\n",
                                dev->transport->get_device_rev(dev));
 }
 
@@ -1537,8 +1411,8 @@ struct se_device *transport_add_device_to_core_hba(
        struct se_device  *dev;
 
        dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
-       if (!(dev)) {
-               printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+       if (!dev) {
+               pr_err("Unable to allocate memory for se_dev_t\n");
                return NULL;
        }
 
@@ -1608,7 +1482,7 @@ struct se_device *transport_add_device_to_core_hba(
        dev->process_thread = kthread_run(transport_processing_thread, dev,
                                          "LIO_%s", dev->transport->name);
        if (IS_ERR(dev->process_thread)) {
-               printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+               pr_err("Unable to create kthread: LIO_%s\n",
                        dev->transport->name);
                goto out;
        }
@@ -1626,7 +1500,7 @@ struct se_device *transport_add_device_to_core_hba(
         */
        if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
                if (!inquiry_prod || !inquiry_rev) {
-                       printk(KERN_ERR "All non TCM/pSCSI plugins require"
+                       pr_err("All non TCM/pSCSI plugins require"
                                " INQUIRY consts\n");
                        goto out;
                }
@@ -1688,9 +1562,9 @@ transport_generic_get_task(struct se_cmd *cmd,
        struct se_task *task;
        struct se_device *dev = cmd->se_dev;
 
-       task = dev->transport->alloc_task(cmd);
+       task = dev->transport->alloc_task(cmd->t_task_cdb);
        if (!task) {
-               printk(KERN_ERR "Unable to allocate struct se_task\n");
+               pr_err("Unable to allocate struct se_task\n");
                return NULL;
        }
 
@@ -1751,7 +1625,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
                return 0;
 
        if (cmd->sam_task_attr == MSG_ACA_TAG) {
-               DEBUG_STA("SAM Task Attribute ACA"
+               pr_debug("SAM Task Attribute ACA"
                        " emulation is not supported\n");
                return -EINVAL;
        }
@@ -1761,9 +1635,9 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
         */
        cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
        smp_mb__after_atomic_inc();
-       DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+       pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
-                       TRANSPORT(cmd->se_dev)->name);
+                       cmd->se_dev->transport->name);
        return 0;
 }
 
@@ -1804,7 +1678,7 @@ int transport_generic_allocate_tasks(
         * for VARIABLE_LENGTH_CMD
         */
        if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
-               printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+               pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
                return -EINVAL;
@@ -1817,8 +1691,8 @@ int transport_generic_allocate_tasks(
        if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
                cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
                                                GFP_KERNEL);
-               if (!(cmd->t_task_cdb)) {
-                       printk(KERN_ERR "Unable to allocate cmd->t_task_cdb"
+               if (!cmd->t_task_cdb) {
+                       pr_err("Unable to allocate cmd->t_task_cdb"
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
@@ -1864,7 +1738,7 @@ int transport_generic_handle_cdb(
 {
        if (!cmd->se_lun) {
                dump_stack();
-               printk(KERN_ERR "cmd->se_lun is NULL\n");
+               pr_err("cmd->se_lun is NULL\n");
                return -EINVAL;
        }
 
@@ -1882,12 +1756,12 @@ int transport_handle_cdb_direct(
 {
        if (!cmd->se_lun) {
                dump_stack();
-               printk(KERN_ERR "cmd->se_lun is NULL\n");
+               pr_err("cmd->se_lun is NULL\n");
                return -EINVAL;
        }
        if (in_interrupt()) {
                dump_stack();
-               printk(KERN_ERR "transport_generic_handle_cdb cannot be called"
+               pr_err("transport_generic_handle_cdb cannot be called"
                                " from interrupt context\n");
                return -EINVAL;
        }
@@ -1906,7 +1780,7 @@ int transport_generic_handle_cdb_map(
 {
        if (!cmd->se_lun) {
                dump_stack();
-               printk(KERN_ERR "cmd->se_lun is NULL\n");
+               pr_err("cmd->se_lun is NULL\n");
                return -EINVAL;
        }
 
@@ -1975,7 +1849,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
        unsigned long flags;
        int ret = 0;
 
-       DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+       pr_debug("ITT[0x%08x] - Stopping tasks\n",
                cmd->se_tfo->get_task_tag(cmd));
 
        /*
@@ -1984,7 +1858,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        list_for_each_entry_safe(task, task_tmp,
                                &cmd->t_task_list, t_list) {
-               DEBUG_TS("task_no[%d] - Processing task %p\n",
+               pr_debug("task_no[%d] - Processing task %p\n",
                                task->task_no, task);
                /*
                 * If the struct se_task has not been sent and is not active,
@@ -1997,7 +1871,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
                        transport_remove_task_from_execute_queue(task,
                                        task->se_dev);
 
-                       DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+                       pr_debug("task_no[%d] - Removed from execute queue\n",
                                task->task_no);
                        spin_lock_irqsave(&cmd->t_state_lock, flags);
                        continue;
@@ -2012,10 +1886,10 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
                        spin_unlock_irqrestore(&cmd->t_state_lock,
                                        flags);
 
-                       DEBUG_TS("task_no[%d] - Waiting to complete\n",
+                       pr_debug("task_no[%d] - Waiting to complete\n",
                                task->task_no);
                        wait_for_completion(&task->task_stop_comp);
-                       DEBUG_TS("task_no[%d] - Stopped successfully\n",
+                       pr_debug("task_no[%d] - Stopped successfully\n",
                                task->task_no);
 
                        spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -2024,7 +1898,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
                        atomic_set(&task->task_active, 0);
                        atomic_set(&task->task_stop, 0);
                } else {
-                       DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+                       pr_debug("task_no[%d] - Did nothing\n", task->task_no);
                        ret++;
                }
 
@@ -2046,18 +1920,18 @@ static void transport_generic_request_failure(
 {
        int ret = 0;
 
-       DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+       pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+       pr_debug("-----[ i_state: %d t_state/def_t_state:"
                " %d/%d transport_error_status: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
                cmd->t_state, cmd->deferred_t_state,
                cmd->transport_error_status);
-       DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+       pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
-               " t_transport_sent: %d\n", cmd->t_task_cdbs,
+               " t_transport_sent: %d\n", cmd->t_task_list_num,
                atomic_read(&cmd->t_task_cdbs_left),
                atomic_read(&cmd->t_task_cdbs_sent),
                atomic_read(&cmd->t_task_cdbs_ex_left),
@@ -2146,7 +2020,7 @@ static void transport_generic_request_failure(
                 */
                break;
        default:
-               printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+               pr_err("Unknown transport error for CDB 0x%02x: %d\n",
                        cmd->t_task_cdb[0],
                        cmd->transport_error_status);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2164,7 +2038,7 @@ static void transport_generic_request_failure(
 
 check_stop:
        transport_lun_remove_cmd(cmd);
-       if (!(transport_cmd_check_stop_to_fabric(cmd)))
+       if (!transport_cmd_check_stop_to_fabric(cmd))
                ;
        return;
 
@@ -2178,7 +2052,7 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(atomic_read(&cmd->t_transport_timeout))) {
+       if (!atomic_read(&cmd->t_transport_timeout)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
        }
@@ -2262,7 +2136,7 @@ static void transport_task_timeout_handler(unsigned long data)
        struct se_cmd *cmd = task->task_se_cmd;
        unsigned long flags;
 
-       DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+       pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (task->task_flags & TF_STOP) {
@@ -2274,8 +2148,8 @@ static void transport_task_timeout_handler(unsigned long data)
        /*
         * Determine if transport_complete_task() has already been called.
         */
-       if (!(atomic_read(&task->task_active))) {
-               DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+       if (!atomic_read(&task->task_active)) {
+               pr_debug("transport task: %p cmd: %p timeout task_active"
                                " == 0\n", task, cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
@@ -2290,20 +2164,20 @@ static void transport_task_timeout_handler(unsigned long data)
        task->task_scsi_status = 1;
 
        if (atomic_read(&task->task_stop)) {
-               DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+               pr_debug("transport task: %p cmd: %p timeout task_stop"
                                " == 1\n", task, cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                complete(&task->task_stop_comp);
                return;
        }
 
-       if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
-               DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+       if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+               pr_debug("transport task: %p cmd: %p timeout non zero"
                                " t_task_cdbs_left\n", task, cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
        }
-       DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+       pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
                        task, cmd);
 
        cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
@@ -2326,7 +2200,7 @@ static void transport_start_task_timer(struct se_task *task)
         * If the task_timeout is disabled, exit now.
         */
        timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
-       if (!(timeout))
+       if (!timeout)
                return;
 
        init_timer(&task->task_timer);
@@ -2337,7 +2211,7 @@ static void transport_start_task_timer(struct se_task *task)
        task->task_flags |= TF_RUNNING;
        add_timer(&task->task_timer);
 #if 0
-       printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+       pr_debug("Starting task timer for cmd: %p task: %p seconds:"
                " %d\n", task->task_se_cmd, task, timeout);
 #endif
 }
@@ -2349,7 +2223,7 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
 {
        struct se_cmd *cmd = task->task_se_cmd;
 
-       if (!(task->task_flags & TF_RUNNING))
+       if (!task->task_flags & TF_RUNNING)
                return;
 
        task->task_flags |= TF_STOP;
@@ -2404,9 +2278,9 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
                atomic_inc(&cmd->se_dev->dev_hoq_count);
                smp_mb__after_atomic_inc();
-               DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+               pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
-                       cmd->_task_cdb[0],
+                       cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2418,7 +2292,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
-               DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+               pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
                                " list, se_ordered_id: %u\n",
                                cmd->t_task_cdb[0],
                                cmd->se_ordered_id);
@@ -2427,7 +2301,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
                 * no other older commands exist that need to be
                 * completed first.
                 */
-               if (!(atomic_read(&cmd->se_dev->simple_cmds)))
+               if (!atomic_read(&cmd->se_dev->simple_cmds))
                        return 1;
        } else {
                /*
@@ -2452,7 +2326,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
                                &cmd->se_dev->delayed_cmd_list);
                spin_unlock(&cmd->se_dev->delayed_cmd_lock);
 
-               DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+               pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
                        " delayed CMD list, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0], cmd->sam_task_attr,
                        cmd->se_ordered_id);
@@ -2486,7 +2360,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
         * Call transport_cmd_check_stop() to see if a fabric exception
         * has occurred that prevents execution.
         */
-       if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+       if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
                /*
                 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
                 * attribute for the tasks of the received struct se_cmd CDB
@@ -2777,7 +2651,7 @@ static inline u32 transport_get_size(
                        return sectors;
        }
 #if 0
-       printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+       pr_debug("Returning block_size: %u, sectors: %u == %u for"
                        " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
                        dev->se_sub_dev->se_dev_attrib.block_size * sectors,
                        dev->transport->name);
@@ -2832,8 +2706,8 @@ static void transport_xor_callback(struct se_cmd *cmd)
         * 5) transfer the resulting XOR data to the data-in buffer.
         */
        buf = kmalloc(cmd->data_length, GFP_KERNEL);
-       if (!(buf)) {
-               printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+       if (!buf) {
+               pr_err("Unable to allocate xor_callback buf\n");
                return;
        }
        /*
@@ -2893,18 +2767,18 @@ static int transport_get_sense_data(struct se_cmd *cmd)
                        continue;
 
                dev = task->se_dev;
-               if (!(dev))
+               if (!dev)
                        continue;
 
                if (!dev->transport->get_sense_buffer) {
-                       printk(KERN_ERR "dev->transport->get_sense_buffer"
+                       pr_err("dev->transport->get_sense_buffer"
                                        " is NULL\n");
                        continue;
                }
 
                sense_buffer = dev->transport->get_sense_buffer(task);
-               if (!(sense_buffer)) {
-                       printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+               if (!sense_buffer) {
+                       pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
                                " sense buffer for task with sense\n",
                                cmd->se_tfo->get_task_tag(cmd), task->task_no);
                        continue;
@@ -2921,7 +2795,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
                cmd->scsi_sense_length =
                                (TRANSPORT_SENSE_BUFFER + offset);
 
-               printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+               pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
                                " and sense\n",
                        dev->se_hba->hba_id, dev->transport->name,
                                cmd->scsi_status);
@@ -2969,13 +2843,12 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
 
        sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
 
-       if ((cmd->t_task_lba + sectors) >
-            transport_dev_end_lba(dev)) {
-               printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+       if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+               pr_err("LBA: %llu Sectors: %u exceeds"
                        " transport_dev_end_lba(): %llu\n",
                        cmd->t_task_lba, sectors,
                        transport_dev_end_lba(dev));
-               printk(KERN_ERR "  We should return CHECK_CONDITION"
+               pr_err("  We should return CHECK_CONDITION"
                       " but we don't yet\n");
                return 0;
        }
@@ -3026,7 +2899,7 @@ static int transport_generic_cmd_sequencer(
                 */
                if (ret > 0) {
 #if 0
-                       printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+                       pr_debug("[%s]: ALUA TG Port not available,"
                                " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
                                cmd->se_tfo->get_fabric_name(), alua_ascq);
 #endif
@@ -3192,10 +3065,13 @@ static int transport_generic_cmd_sequencer(
                        if (sector_ret)
                                goto out_unsupported_cdb;
 
-                       if (sectors != 0)
+                       if (sectors)
                                size = transport_get_size(sectors, cdb, cmd);
-                       else
-                               size = dev->se_sub_dev->se_dev_attrib.block_size;
+                       else {
+                               pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+                                      " supported\n");
+                               goto out_invalid_cdb_field;
+                       }
 
                        cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
                        cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
@@ -3207,7 +3083,7 @@ static int transport_generic_cmd_sequencer(
                                break;
 
                        if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
-                               printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+                               pr_err("WRITE_SAME PBDATA and LBDATA"
                                        " bits not supported for Block Discard"
                                        " Emulation\n");
                                goto out_invalid_cdb_field;
@@ -3217,13 +3093,13 @@ static int transport_generic_cmd_sequencer(
                         * tpws with the UNMAP=1 bit set.
                         */
                        if (!(cdb[10] & 0x08)) {
-                               printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+                               pr_err("WRITE_SAME w/o UNMAP bit not"
                                        " supported for Block Discard Emulation\n");
                                goto out_invalid_cdb_field;
                        }
                        break;
                default:
-                       printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+                       pr_err("VARIABLE_LENGTH_CMD service action"
                                " 0x%04x not supported\n", service_action);
                        goto out_unsupported_cdb;
                }
@@ -3469,10 +3345,12 @@ static int transport_generic_cmd_sequencer(
                if (sector_ret)
                        goto out_unsupported_cdb;
 
-               if (sectors != 0)
+               if (sectors)
                        size = transport_get_size(sectors, cdb, cmd);
-               else
-                       size = dev->se_sub_dev->se_dev_attrib.block_size;
+               else {
+                       pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+                       goto out_invalid_cdb_field;
+               }
 
                cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
                passthrough = (dev->transport->transport_type ==
@@ -3484,9 +3362,9 @@ static int transport_generic_cmd_sequencer(
                 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
                 * TCM/FILEIO subsystem plugin backstores.
                 */
-               if (!(passthrough)) {
+               if (!passthrough) {
                        if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
-                               printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+                               pr_err("WRITE_SAME PBDATA and LBDATA"
                                        " bits not supported for Block Discard"
                                        " Emulation\n");
                                goto out_invalid_cdb_field;
@@ -3496,7 +3374,7 @@ static int transport_generic_cmd_sequencer(
                         * tpws with the UNMAP=1 bit set.
                         */
                        if (!(cdb[1] & 0x08)) {
-                               printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+                               pr_err("WRITE_SAME w/o UNMAP bit not "
                                        " supported for Block Discard Emulation\n");
                                goto out_invalid_cdb_field;
                        }
@@ -3532,7 +3410,7 @@ static int transport_generic_cmd_sequencer(
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
                break;
        default:
-               printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+               pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
                        " 0x%02x, sending CHECK_CONDITION.\n",
                        cmd->se_tfo->get_fabric_name(), cdb[0]);
                cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
@@ -3540,7 +3418,7 @@ static int transport_generic_cmd_sequencer(
        }
 
        if (size != cmd->data_length) {
-               printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+               pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
                        " %u does not match SCSI CDB Length: %u for SAM Opcode:"
                        " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
                                cmd->data_length, size, cdb[0]);
@@ -3548,7 +3426,7 @@ static int transport_generic_cmd_sequencer(
                cmd->cmd_spdtl = size;
 
                if (cmd->data_direction == DMA_TO_DEVICE) {
-                       printk(KERN_ERR "Rejecting underflow/overflow"
+                       pr_err("Rejecting underflow/overflow"
                                        " WRITE data\n");
                        goto out_invalid_cdb_field;
                }
@@ -3556,8 +3434,8 @@ static int transport_generic_cmd_sequencer(
                 * Reject READ_* or WRITE_* with overflow/underflow for
                 * type SCF_SCSI_DATA_SG_IO_CDB.
                 */
-               if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
-                       printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+               if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
+                       pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
                                " CDB on non 512-byte sector setup subsystem"
                                " plugin: %s\n", dev->transport->name);
                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
@@ -3607,14 +3485,14 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                atomic_dec(&dev->simple_cmds);
                smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
-               DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+               pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
                atomic_dec(&dev->dev_hoq_count);
                smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
-               DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+               pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -3625,7 +3503,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
-               DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+               pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
                        " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
        }
        /*
@@ -3640,10 +3518,10 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                list_del(&cmd_p->se_delayed_node);
                spin_unlock(&dev->delayed_cmd_lock);
 
-               DEBUG_STA("Calling add_tasks() for"
+               pr_debug("Calling add_tasks() for"
                        " cmd_p: 0x%02x Task Attr: 0x%02x"
                        " Dormant -> Active, se_ordered_id: %u\n",
-                       T_TASK(cmd_p)->t_task_cdb[0],
+                       cmd_p->t_task_cdb[0],
                        cmd_p->sam_task_attr, cmd_p->se_ordered_id);
 
                transport_add_tasks_from_cmd(cmd_p);
@@ -3812,7 +3690,7 @@ done:
        return;
 
 queue_full:
-       printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+       pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
                " data_direction: %d\n", cmd, cmd->data_direction);
        transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
 }
@@ -3837,49 +3715,34 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
                if (task->se_dev)
                        task->se_dev->transport->free_task(task);
                else
-                       printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+                       pr_err("task[%u] - task->se_dev is NULL\n",
                                task->task_no);
                spin_lock_irqsave(&cmd->t_state_lock, flags);
        }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static inline void transport_free_pages(struct se_cmd *cmd)
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
 {
        struct scatterlist *sg;
-       int free_page = 1;
        int count;
 
-       if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
-               free_page = 0;
-       if (cmd->se_dev->transport->do_se_mem_map)
-               free_page = 0;
+       for_each_sg(sgl, sg, nents, count)
+               __free_page(sg_page(sg));
 
-       for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) {
-               /*
-                * Only called if
-                * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
-                */
-               if (free_page)
-                       __free_page(sg_page(sg));
+       kfree(sgl);
+}
 
-       }
-       if (free_page)
-               kfree(cmd->t_data_sg);
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+       if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+               return;
+
+       transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
        cmd->t_data_sg = NULL;
        cmd->t_data_nents = 0;
 
-       for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
-               /*
-                * Only called if
-                * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
-                */
-               if (free_page)
-                       __free_page(sg_page(sg));
-
-       }
-       if (free_page)
-               kfree(cmd->t_bidi_data_sg);
+       transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
        cmd->t_bidi_data_sg = NULL;
        cmd->t_bidi_data_nents = 0;
 }
@@ -3895,7 +3758,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (atomic_read(&cmd->t_fe_count)) {
-               if (!(atomic_dec_and_test(&cmd->t_fe_count))) {
+               if (!atomic_dec_and_test(&cmd->t_fe_count)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock,
                                        flags);
                        return 1;
@@ -3903,7 +3766,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
        }
 
        if (atomic_read(&cmd->t_se_count)) {
-               if (!(atomic_dec_and_test(&cmd->t_se_count))) {
+               if (!atomic_dec_and_test(&cmd->t_se_count)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock,
                                        flags);
                        return 1;
@@ -3922,7 +3785,7 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
                return;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(atomic_read(&cmd->transport_dev_active))) {
+       if (!atomic_read(&cmd->transport_dev_active)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto free_pages;
        }
@@ -3953,7 +3816,7 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
        }
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!(atomic_read(&cmd->transport_dev_active))) {
+       if (!atomic_read(&cmd->transport_dev_active)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto free_pages;
        }
@@ -4027,7 +3890,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
                                              DMA_FROM_DEVICE,
                                              cmd->t_bidi_data_sg,
                                              cmd->t_bidi_data_nents);
-               if (!rc) {
+               if (rc <= 0) {
                        cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        cmd->scsi_sense_reason =
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4046,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
                                             cmd->data_direction,
                                             cmd->t_data_sg,
                                             cmd->t_data_nents);
-       if (!task_cdbs) {
+       if (task_cdbs <= 0) {
                cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                cmd->scsi_sense_reason =
                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4094,12 +3957,6 @@ transport_generic_get_mem(struct se_cmd *cmd)
        struct page *page;
        int i = 0;
 
-       /*
-        * If the device uses memory mapping this is enough.
-        */
-       if (cmd->se_dev->transport->do_se_mem_map)
-               return 0;
-
        nents = DIV_ROUND_UP(length, PAGE_SIZE);
        cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
        if (!cmd->t_data_sg)
@@ -4176,14 +4033,14 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 
                if (!sg_first) {
                        sg_first = task->task_sg;
-                       chained_nents = task->task_sg_num;
+                       chained_nents = task->task_sg_nents;
                } else {
                        sg_chain(sg_prev, sg_prev_nents, task->task_sg);
-                       chained_nents += task->task_sg_num;
+                       chained_nents += task->task_sg_nents;
                }
 
                sg_prev = task->task_sg;
-               sg_prev_nents = task->task_sg_num;
+               sg_prev_nents = task->task_sg_nents;
        }
        /*
         * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4192,19 +4049,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
        cmd->t_tasks_sg_chained = sg_first;
        cmd->t_tasks_sg_chained_no = chained_nents;
 
-       DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+       pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
                " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
                cmd->t_tasks_sg_chained_no);
 
        for_each_sg(cmd->t_tasks_sg_chained, sg,
                        cmd->t_tasks_sg_chained_no, i) {
 
-               DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
+               pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
                        i, sg, sg_page(sg), sg->length, sg->offset);
                if (sg_is_chain(sg))
-                       DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+                       pr_debug("SG: %p sg_is_chain=1\n", sg);
                if (sg_is_last(sg))
-                       DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+                       pr_debug("SG: %p sg_is_last=1\n", sg);
        }
 }
 EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -4266,25 +4123,25 @@ static int transport_allocate_data_tasks(
                 * It's so much easier and only a waste when task_count > 1.
                 * That is extremely rare.
                 */
-               task->task_sg_num = sgl_nents;
+               task->task_sg_nents = sgl_nents;
                if (cmd->se_tfo->task_sg_chaining) {
-                       task->task_sg_num++;
+                       task->task_sg_nents++;
                        task->task_padded_sg = 1;
                }
 
                task->task_sg = kmalloc(sizeof(struct scatterlist) * \
-                                       task->task_sg_num, GFP_KERNEL);
+                                       task->task_sg_nents, GFP_KERNEL);
                if (!task->task_sg) {
                        cmd->se_dev->transport->free_task(task);
                        return -ENOMEM;
                }
 
-               sg_init_table(task->task_sg, task->task_sg_num);
+               sg_init_table(task->task_sg, task->task_sg_nents);
 
                task_size = task->task_size;
 
                /* Build new sgl, only up to task_size */
-               for_each_sg(task->task_sg, sg, task->task_sg_num, count) {
+               for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
                        if (cmd_sg->length > task_size)
                                break;
 
@@ -4311,6 +4168,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
        unsigned char *cdb;
        struct se_task *task;
        unsigned long flags;
+       int ret = 0;
 
        task = transport_generic_get_task(cmd, cmd->data_direction);
        if (!task)
@@ -4331,7 +4189,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
        memcpy(task->task_sg, cmd->t_data_sg,
               sizeof(struct scatterlist) * cmd->t_data_nents);
        task->task_size = cmd->data_length;
-       task->task_sg_num = cmd->t_data_nents;
+       task->task_sg_nents = cmd->t_data_nents;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        list_add_tail(&task->t_list, &cmd->t_task_list);
@@ -4339,16 +4197,19 @@ transport_allocate_control_task(struct se_cmd *cmd)
 
        if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
                if (dev->transport->map_task_SG)
-                       return dev->transport->map_task_SG(task);
-               return 0;
+                       ret = dev->transport->map_task_SG(task);
        } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
                if (dev->transport->cdb_none)
-                       return dev->transport->cdb_none(task);
-               return 0;
+                       ret = dev->transport->cdb_none(task);
        } else {
+               pr_err("target: Unknown control cmd type!\n");
                BUG();
-               return -ENOMEM;
        }
+
+       /* Success! Return number of tasks allocated */
+       if (ret == 0)
+               return 1;
+       return ret;
 }
 
 static u32 transport_allocate_tasks(
@@ -4358,18 +4219,12 @@ static u32 transport_allocate_tasks(
        struct scatterlist *sgl,
        unsigned int sgl_nents)
 {
-       int ret;
-
-       if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+       if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
                return transport_allocate_data_tasks(cmd, lba, data_direction,
                                                     sgl, sgl_nents);
-       } else {
-               ret = transport_allocate_control_task(cmd);
-               if (ret < 0)
-                       return ret;
-               else
-                       return 1;
-       }
+       else
+               return transport_allocate_control_task(cmd);
+
 }
 
 
@@ -4441,64 +4296,6 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
  */
 void transport_generic_process_write(struct se_cmd *cmd)
 {
-#if 0
-       /*
-        * Copy SCSI Presented DTL sector(s) from received buffers allocated to
-        * original EDTL
-        */
-       if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
-               if (!cmd->t_tasks_se_num) {
-                       unsigned char *dst, *buf =
-                               (unsigned char *)cmd->t_task_buf;
-
-                       dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
-                       if (!(dst)) {
-                               printk(KERN_ERR "Unable to allocate memory for"
-                                               " WRITE underflow\n");
-                               transport_generic_request_failure(cmd, NULL,
-                                       PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-                               return;
-                       }
-                       memcpy(dst, buf, cmd->cmd_spdtl);
-
-                       kfree(cmd->t_task_buf);
-                       cmd->t_task_buf = dst;
-               } else {
-                       struct scatterlist *sg =
-                               (struct scatterlist *sg)cmd->t_task_buf;
-                       struct scatterlist *orig_sg;
-
-                       orig_sg = kzalloc(sizeof(struct scatterlist) *
-                                       cmd->t_tasks_se_num,
-                                       GFP_KERNEL))) {
-                       if (!(orig_sg)) {
-                               printk(KERN_ERR "Unable to allocate memory"
-                                               " for WRITE underflow\n");
-                               transport_generic_request_failure(cmd, NULL,
-                                       PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-                               return;
-                       }
-
-                       memcpy(orig_sg, cmd->t_task_buf,
-                                       sizeof(struct scatterlist) *
-                                       cmd->t_tasks_se_num);
-
-                       cmd->data_length = cmd->cmd_spdtl;
-                       /*
-                        * FIXME, clear out original struct se_task and state
-                        * information.
-                        */
-                       if (transport_generic_new_cmd(cmd) < 0) {
-                               transport_generic_request_failure(cmd, NULL,
-                                       PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-                               kfree(orig_sg);
-                               return;
-                       }
-
-                       transport_memcpy_write_sg(cmd, orig_sg);
-               }
-       }
-#endif
        transport_execute_tasks(cmd);
 }
 EXPORT_SYMBOL(transport_generic_process_write);
@@ -4554,7 +4351,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        return PYX_TRANSPORT_WRITE_PENDING;
 
 queue_full:
-       printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+       pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
        cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
        transport_handle_queue_full(cmd, cmd->se_dev,
                        transport_write_pending_qf);
@@ -4586,7 +4383,7 @@ void transport_generic_free_cmd(
 
                if (cmd->se_lun) {
 #if 0
-                       printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+                       pr_debug("cmd: %p ITT: 0x%08x contains"
                                " cmd->se_lun\n", cmd,
                                cmd->se_tfo->get_task_tag(cmd));
 #endif
@@ -4627,7 +4424,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (atomic_read(&cmd->t_transport_stop)) {
                atomic_set(&cmd->transport_lun_stop, 0);
-               DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+               pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
                        " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                transport_cmd_check_stop(cmd, 1, 0);
@@ -4640,13 +4437,13 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 
        ret = transport_stop_tasks_for_cmd(cmd);
 
-       DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
-                       " %d\n", cmd, cmd->t_task_cdbs, ret);
+       pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+                       " %d\n", cmd, cmd->t_task_list_num, ret);
        if (!ret) {
-               DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+               pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
                                cmd->se_tfo->get_task_tag(cmd));
                wait_for_completion(&cmd->transport_lun_stop_comp);
-               DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+               pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
                                cmd->se_tfo->get_task_tag(cmd));
        }
        transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
@@ -4654,13 +4451,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
        return 0;
 }
 
-/* #define DEBUG_CLEAR_LUN */
-#ifdef DEBUG_CLEAR_LUN
-#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CLEAR_L(x...)
-#endif
-
 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 {
        struct se_cmd *cmd = NULL;
@@ -4682,7 +4472,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                 * progress for the iscsi_cmd_t.
                 */
                spin_lock(&cmd->t_state_lock);
-               DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport"
+               pr_debug("SE_LUN[%d] - Setting cmd->transport"
                        "_lun_stop for  ITT: 0x%08x\n",
                        cmd->se_lun->unpacked_lun,
                        cmd->se_tfo->get_task_tag(cmd));
@@ -4691,8 +4481,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 
                spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
 
-               if (!(cmd->se_lun)) {
-                       printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+               if (!cmd->se_lun) {
+                       pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
                                cmd->se_tfo->get_task_tag(cmd),
                                cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
                        BUG();
@@ -4701,7 +4491,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                 * If the Storage engine still owns the iscsi_cmd_t, determine
                 * and/or stop its context.
                 */
-               DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+               pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
                        "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
                        cmd->se_tfo->get_task_tag(cmd));
 
@@ -4710,13 +4500,13 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                        continue;
                }
 
-               DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+               pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
                        "_wait_for_tasks(): SUCCESS\n",
                        cmd->se_lun->unpacked_lun,
                        cmd->se_tfo->get_task_tag(cmd));
 
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (!(atomic_read(&cmd->transport_dev_active))) {
+               if (!atomic_read(&cmd->transport_dev_active)) {
                        spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
                        goto check_cond;
                }
@@ -4741,7 +4531,7 @@ check_cond:
                 */
                spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
                if (atomic_read(&cmd->transport_lun_fe_stop)) {
-                       DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+                       pr_debug("SE_LUN[%d] - Detected FE stop for"
                                " struct se_cmd: %p ITT: 0x%08x\n",
                                lun->unpacked_lun,
                                cmd, cmd->se_tfo->get_task_tag(cmd));
@@ -4753,7 +4543,7 @@ check_cond:
                        spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
                        continue;
                }
-               DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+               pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
                        lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
 
                spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
@@ -4779,7 +4569,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
        kt = kthread_run(transport_clear_lun_thread, lun,
                        "tcm_cl_%u", lun->unpacked_lun);
        if (IS_ERR(kt)) {
-               printk(KERN_ERR "Unable to start clear_lun thread\n");
+               pr_err("Unable to start clear_lun thread\n");
                return PTR_ERR(kt);
        }
        wait_for_completion(&lun->lun_shutdown_comp);
@@ -4812,7 +4602,7 @@ static void transport_generic_wait_for_tasks(
         */
        if (atomic_read(&cmd->transport_lun_stop)) {
 
-               DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+               pr_debug("wait_for_tasks: Stopping"
                        " wait_for_completion(&cmd->t_tasktransport_lun_fe"
                        "_stop_comp); for ITT: 0x%08x\n",
                        cmd->se_tfo->get_task_tag(cmd));
@@ -4834,7 +4624,7 @@ static void transport_generic_wait_for_tasks(
                 * struct se_cmd, now owns the structure and can be released through
                 * normal means below.
                 */
-               DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+               pr_debug("wait_for_tasks: Stopped"
                        " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
                        "stop_comp); for ITT: 0x%08x\n",
                        cmd->se_tfo->get_task_tag(cmd));
@@ -4847,7 +4637,7 @@ static void transport_generic_wait_for_tasks(
 
        atomic_set(&cmd->t_transport_stop, 1);
 
-       DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+       pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
                " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
                " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
@@ -4863,7 +4653,7 @@ static void transport_generic_wait_for_tasks(
        atomic_set(&cmd->t_transport_active, 0);
        atomic_set(&cmd->t_transport_stop, 0);
 
-       DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+       pr_debug("wait_for_tasks: Stopped wait_for_compltion("
                "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
                cmd->se_tfo->get_task_tag(cmd));
 remove:
@@ -5071,11 +4861,11 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        int ret = 0;
 
        if (atomic_read(&cmd->t_transport_aborted) != 0) {
-               if (!(send_status) ||
+               if (!send_status ||
                     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
                        return 1;
 #if 0
-               printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+               pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
                        " status for CDB: 0x%02x ITT: 0x%08x\n",
                        cmd->t_task_cdb[0],
                        cmd->se_tfo->get_task_tag(cmd));
@@ -5107,7 +4897,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 #if 0
-       printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+       pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
                " ITT: 0x%08x\n", cmd->t_task_cdb[0],
                cmd->se_tfo->get_task_tag(cmd));
 #endif
@@ -5145,7 +4935,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
                tmr->response = TMR_FUNCTION_REJECTED;
                break;
        default:
-               printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+               pr_err("Uknown TMR function: 0x%02x.\n",
                                tmr->function);
                tmr->response = TMR_FUNCTION_REJECTED;
                break;
@@ -5190,7 +4980,7 @@ static void transport_processing_shutdown(struct se_device *dev)
        spin_lock_irqsave(&dev->execute_task_lock, flags);
        while ((task = transport_get_task_from_state_list(dev))) {
                if (!task->task_se_cmd) {
-                       printk(KERN_ERR "task->task_se_cmd is NULL!\n");
+                       pr_err("task->task_se_cmd is NULL!\n");
                        continue;
                }
                cmd = task->task_se_cmd;
@@ -5199,18 +4989,18 @@ static void transport_processing_shutdown(struct se_device *dev)
 
                spin_lock_irqsave(&cmd->t_state_lock, flags);
 
-               DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
-                       " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+               pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
+                       " i_state%d, t_state/def_t_state:"
                        " %d/%d cdb: 0x%02x\n", cmd, task,
-                       cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
-                       cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
+                       cmd->se_tfo->get_task_tag(cmd),
+                       cmd->se_tfo->get_cmd_state(cmd),
                        cmd->t_state, cmd->deferred_t_state,
                        cmd->t_task_cdb[0]);
-               DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+               pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
                        " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
                        " t_transport_stop: %d t_transport_sent: %d\n",
                        cmd->se_tfo->get_task_tag(cmd),
-                       cmd->t_task_cdbs,
+                       cmd->t_task_list_num,
                        atomic_read(&cmd->t_task_cdbs_left),
                        atomic_read(&cmd->t_task_cdbs_sent),
                        atomic_read(&cmd->t_transport_active),
@@ -5222,10 +5012,10 @@ static void transport_processing_shutdown(struct se_device *dev)
                        spin_unlock_irqrestore(
                                &cmd->t_state_lock, flags);
 
-                       DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+                       pr_debug("Waiting for task: %p to shutdown for dev:"
                                " %p\n", task, dev);
                        wait_for_completion(&task->task_stop_comp);
-                       DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+                       pr_debug("Completed task: %p shutdown for dev: %p\n",
                                task, dev);
 
                        spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -5239,11 +5029,11 @@ static void transport_processing_shutdown(struct se_device *dev)
                }
                __transport_stop_task_timer(task, &flags);
 
-               if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
+               if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
                        spin_unlock_irqrestore(
                                        &cmd->t_state_lock, flags);
 
-                       DEBUG_DO("Skipping task: %p, dev: %p for"
+                       pr_debug("Skipping task: %p, dev: %p for"
                                " t_task_cdbs_ex_left: %d\n", task, dev,
                                atomic_read(&cmd->t_task_cdbs_ex_left));
 
@@ -5252,7 +5042,7 @@ static void transport_processing_shutdown(struct se_device *dev)
                }
 
                if (atomic_read(&cmd->t_transport_active)) {
-                       DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+                       pr_debug("got t_transport_active = 1 for task: %p, dev:"
                                        " %p\n", task, dev);
 
                        if (atomic_read(&cmd->t_fe_count)) {
@@ -5282,7 +5072,7 @@ static void transport_processing_shutdown(struct se_device *dev)
                        spin_lock_irqsave(&dev->execute_task_lock, flags);
                        continue;
                }
-               DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+               pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
                                task, dev);
 
                if (atomic_read(&cmd->t_fe_count)) {
@@ -5315,7 +5105,7 @@ static void transport_processing_shutdown(struct se_device *dev)
         */
        while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
 
-               DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+               pr_debug("From Device Queue: cmd: %p t_state: %d\n",
                                cmd, cmd->t_state);
 
                if (atomic_read(&cmd->t_fe_count)) {
@@ -5368,8 +5158,8 @@ get_cmd:
 
                switch (cmd->t_state) {
                case TRANSPORT_NEW_CMD_MAP:
-                       if (!(cmd->se_tfo->new_cmd_map)) {
-                               printk(KERN_ERR "cmd->se_tfo->new_cmd_map is"
+                       if (!cmd->se_tfo->new_cmd_map) {
+                               pr_err("cmd->se_tfo->new_cmd_map is"
                                        " NULL for TRANSPORT_NEW_CMD_MAP\n");
                                BUG();
                        }
@@ -5420,7 +5210,7 @@ get_cmd:
                        transport_generic_write_pending(cmd);
                        break;
                default:
-                       printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+                       pr_err("Unknown t_state: %d deferred_t_state:"
                                " %d for ITT: 0x%08x i_state: %d on SE LUN:"
                                " %u\n", cmd->t_state, cmd->deferred_t_state,
                                cmd->se_tfo->get_task_tag(cmd),