Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Nov 2013 18:52:03 +0000 (10:52 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Nov 2013 18:52:03 +0000 (10:52 -0800)
Pull SCSI target updates from Nicholas Bellinger:
 "Things have been quiet this round with mostly bugfixes, percpu
  conversions, and other minor iscsi-target conformance testing changes.

  The highlights include:

   - Add demo_mode_discovery attribute for iscsi-target (Thomas)
   - Convert tcm_fc(FCoE) to use percpu-ida pre-allocation
   - Add send completion interrupt coalescing for ib_isert
   - Convert target-core to use percpu-refcounting for se_lun
   - Fix mutex_trylock usage bug in iscsit_increment_maxcmdsn
   - tcm_loop updates (Hannes)
   - target-core ALUA cleanups + prep for v3.14 SCSI Referrals support (Hannes)

  v3.14 is currently shaping to be a busy development cycle in target
  land, with initial support for T10 Referrals and T10 DIF currently on
  the roadmap"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (40 commits)
  iscsi-target: chap auth shouldn't match username with trailing garbage
  iscsi-target: fix extract_param to handle buffer length corner case
  iscsi-target: Expose default_erl as TPG attribute
  target_core_configfs: split up ALUA supported states
  target_core_alua: Make supported states configurable
  target_core_alua: Store supported ALUA states
  target_core_alua: Rename ALUA_ACCESS_STATE_OPTIMIZED
  target_core_alua: spellcheck
  target core: rename (ex,im)plict -> (ex,im)plicit
  percpu-refcount: Add percpu-refcount.o to obj-y
  iscsi-target: Do not reject non-immediate CmdSNs exceeding MaxCmdSN
  iscsi-target: Convert iscsi_session statistics to atomic_long_t
  target: Convert se_device statistics to atomic_long_t
  target: Fix delayed Task Aborted Status (TAS) handling bug
  iscsi-target: Reject unsupported multi PDU text command sequence
  ib_isert: Avoid duplicate iscsit_increment_maxcmdsn call
  iscsi-target: Fix mutex_trylock usage in iscsit_increment_maxcmdsn
  target: Core does not need blkdev.h
  target: Pass through I/O topology for block backstores
  iser-target: Avoid using FRMR for single dma entry requests
  ...

12 files changed:
1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/usb/gadget/tcm_usb_gadget.c
drivers/vhost/scsi.c
lib/Makefile
lib/percpu_ida.c

index 6df23502059a44eccd32c725477cd5fee1ef2923,56610751eb28fe3769bc691236d25afd5a03ab54..6be57c38638d28dd0a39464bffe391c4a093f478
@@@ -22,6 -22,7 +22,7 @@@
  #include <linux/socket.h>
  #include <linux/in.h>
  #include <linux/in6.h>
+ #include <linux/llist.h>
  #include <rdma/ib_verbs.h>
  #include <rdma/rdma_cm.h>
  #include <target/target_core_base.h>
@@@ -489,6 -490,7 +490,7 @@@ isert_connect_request(struct rdma_cm_i
        kref_init(&isert_conn->conn_kref);
        kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
+       mutex_init(&isert_conn->conn_comp_mutex);
        spin_lock_init(&isert_conn->conn_lock);
  
        cma_id->context = isert_conn;
@@@ -594,7 -596,7 +596,7 @@@ isert_connect_release(struct isert_con
  
        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
  
 -      if (device->use_frwr)
 +      if (device && device->use_frwr)
                isert_conn_free_frwr_pool(isert_conn);
  
        if (isert_conn->conn_qp) {
@@@ -843,14 -845,32 +845,32 @@@ isert_init_tx_hdrs(struct isert_conn *i
  }
  
  static void
- isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
+ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+                  struct ib_send_wr *send_wr, bool coalesce)
  {
+       struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
        isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
        send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
        send_wr->opcode = IB_WR_SEND;
-       send_wr->send_flags = IB_SEND_SIGNALED;
-       send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
+       send_wr->sg_list = &tx_desc->tx_sg[0];
        send_wr->num_sge = isert_cmd->tx_desc.num_sge;
+       /*
+        * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
+        * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
+        */
+       mutex_lock(&isert_conn->conn_comp_mutex);
+       if (coalesce &&
+           ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+               llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
+               mutex_unlock(&isert_conn->conn_comp_mutex);
+               return;
+       }
+       isert_conn->conn_comp_batch = 0;
+       tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
+       mutex_unlock(&isert_conn->conn_comp_mutex);
+       send_wr->send_flags = IB_SEND_SIGNALED;
  }
  
  static int
@@@ -1582,8 -1602,8 +1602,8 @@@ isert_response_completion(struct iser_t
  }
  
  static void
- isert_send_completion(struct iser_tx_desc *tx_desc,
-                     struct isert_conn *isert_conn)
__isert_send_completion(struct iser_tx_desc *tx_desc,
+                       struct isert_conn *isert_conn)
  {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
        }
  }
  
+ static void
+ isert_send_completion(struct iser_tx_desc *tx_desc,
+                     struct isert_conn *isert_conn)
+ {
+       struct llist_node *llnode = tx_desc->comp_llnode_batch;
+       struct iser_tx_desc *t;
+       /*
+        * Drain coalesced completion llist starting from comp_llnode_batch
+        * setup in isert_init_send_wr(), and then complete trailing tx_desc.
+        */
+       while (llnode) {
+               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+               llnode = llist_next(llnode);
+               __isert_send_completion(t, isert_conn);
+       }
+       __isert_send_completion(tx_desc, isert_conn);
+ }
  static void
  isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
  {
@@@ -1793,7 -1831,7 +1831,7 @@@ isert_put_response(struct iscsi_conn *c
                isert_cmd->tx_desc.num_sge = 2;
        }
  
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
  
        pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1813,7 -1851,7 +1851,7 @@@ isert_put_nopin(struct iscsi_cmd *cmd, 
                               &isert_cmd->tx_desc.iscsi_header,
                               nopout_response);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1831,7 -1869,7 +1869,7 @@@ isert_put_logout_rsp(struct iscsi_cmd *
        iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
                                &isert_cmd->tx_desc.iscsi_header);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1849,7 -1887,7 +1887,7 @@@ isert_put_tm_rsp(struct iscsi_cmd *cmd
        iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
                                  &isert_cmd->tx_desc.iscsi_header);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1881,7 -1919,7 +1919,7 @@@ isert_put_reject(struct iscsi_cmd *cmd
        tx_dsg->lkey    = isert_conn->conn_mr->lkey;
        isert_cmd->tx_desc.num_sge = 2;
  
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1921,7 -1959,7 +1959,7 @@@ isert_put_text_rsp(struct iscsi_cmd *cm
                tx_dsg->lkey    = isert_conn->conn_mr->lkey;
                isert_cmd->tx_desc.num_sge = 2;
        }
-       isert_init_send_wr(isert_cmd, send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -1991,8 -2029,6 +2029,6 @@@ isert_map_rdma(struct iscsi_conn *conn
  
        if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
                data_left = se_cmd->data_length;
-               iscsit_increment_maxcmdsn(cmd, conn->sess);
-               cmd->stat_sn = conn->stat_sn++;
        } else {
                sg_off = cmd->write_data_done / PAGE_SIZE;
                data_left = se_cmd->data_length - cmd->write_data_done;
@@@ -2204,8 -2240,6 +2240,6 @@@ isert_reg_rdma_frwr(struct iscsi_conn *
  
        if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
                data_left = se_cmd->data_length;
-               iscsit_increment_maxcmdsn(cmd, conn->sess);
-               cmd->stat_sn = conn->stat_sn++;
        } else {
                sg_off = cmd->write_data_done / PAGE_SIZE;
                data_left = se_cmd->data_length - cmd->write_data_done;
        data_len = min(data_left, rdma_write_max);
        wr->cur_rdma_length = data_len;
  
-       spin_lock_irqsave(&isert_conn->conn_lock, flags);
-       fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
-                                  struct fast_reg_descriptor, list);
-       list_del(&fr_desc->list);
-       spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
-       wr->fr_desc = fr_desc;
+       /* if there is a single dma entry, dma mr is sufficient */
+       if (count == 1) {
+               ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
+               ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
+               ib_sge->lkey = isert_conn->conn_mr->lkey;
+               wr->fr_desc = NULL;
+       } else {
+               spin_lock_irqsave(&isert_conn->conn_lock, flags);
+               fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+                                          struct fast_reg_descriptor, list);
+               list_del(&fr_desc->list);
+               spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+               wr->fr_desc = fr_desc;
  
-       ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
-                         ib_sge, offset, data_len);
-       if (ret) {
-               list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
-               goto unmap_sg;
+               ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
+                                 ib_sge, offset, data_len);
+               if (ret) {
+                       list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+                       goto unmap_sg;
+               }
        }
  
        return 0;
@@@ -2306,10 -2348,11 +2348,11 @@@ isert_put_datain(struct iscsi_conn *con
         * Build isert_conn->tx_desc for iSCSI response PDU and attach
         */
        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-       iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
+       iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
                             &isert_cmd->tx_desc.iscsi_header);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+       isert_init_send_wr(isert_conn, isert_cmd,
+                          &isert_cmd->tx_desc.send_wr, true);
  
        atomic_inc(&isert_conn->post_send_buf_count);
  
index 6c923c7039a156d10eeaa7a39f339ecd84a360ac,16087966cb7d75c9b83dc6a017ec7e857917ff4e..520a7e5a490b1b61042cad7acead955bbed2b759
@@@ -1352,11 -1352,8 +1352,8 @@@ static int srpt_abort_cmd(struct srpt_s
  
                /* XXX(hch): this is a horrible layering violation.. */
                spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
-               ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
                ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
                spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
-               complete(&ioctx->cmd.transport_lun_stop_comp);
                break;
        case SRPT_STATE_CMD_RSP_SENT:
                /*
                 * not been received in time.
                 */
                srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
-               spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
-               ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
-               spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
                target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
@@@ -1476,7 -1470,6 +1470,6 @@@ static void srpt_handle_rdma_err_comp(s
  {
        struct se_cmd *cmd;
        enum srpt_command_state state;
-       unsigned long flags;
  
        cmd = &ioctx->cmd;
        state = srpt_get_cmd_state(ioctx);
                               __func__, __LINE__, state);
                break;
        case SRPT_RDMA_WRITE_LAST:
-               spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
-               ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
-               spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
                break;
        default:
                printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
@@@ -1588,7 -1578,7 +1578,7 @@@ static int srpt_build_tskmgmt_rsp(struc
        int resp_data_len;
        int resp_len;
  
 -      resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
 +      resp_data_len = 4;
        resp_len = sizeof(*srp_rsp) + resp_data_len;
  
        srp_rsp = ioctx->ioctx.buf;
                                    + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
  
 -      if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
 -              srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
 -              srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
 -              srp_rsp->data[3] = rsp_code;
 -      }
 +      srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
 +      srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
 +      srp_rsp->data[3] = rsp_code;
  
        return resp_len;
  }
@@@ -2356,8 -2348,6 +2346,8 @@@ static void srpt_release_channel_work(s
        transport_deregister_session(se_sess);
        ch->sess = NULL;
  
 +      ib_destroy_cm_id(ch->cm_id);
 +
        srpt_destroy_ch_ib(ch);
  
        srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
        list_del(&ch->list);
        spin_unlock_irq(&sdev->spinlock);
  
 -      ib_destroy_cm_id(ch->cm_id);
 -
        if (ch->release_done)
                complete(ch->release_done);
  
index 38e44b9abf0f145eacde1e9b90dcf26c45ca2633,bf76fc46fc52b2fd3193d391c24ee05cce57b0d4..d70e9119e906cba0c0f38da61948ce745bc5d6e5
@@@ -753,8 -753,7 +753,8 @@@ static void iscsit_unmap_iovec(struct i
  
  static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
  {
 -      struct iscsi_cmd *cmd;
 +      LIST_HEAD(ack_list);
 +      struct iscsi_cmd *cmd, *cmd_p;
  
        conn->exp_statsn = exp_statsn;
  
                return;
  
        spin_lock_bh(&conn->cmd_lock);
 -      list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 +      list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
                spin_lock(&cmd->istate_lock);
                if ((cmd->i_state == ISTATE_SENT_STATUS) &&
                    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
                        cmd->i_state = ISTATE_REMOVE;
                        spin_unlock(&cmd->istate_lock);
 -                      iscsit_add_cmd_to_immediate_queue(cmd, conn,
 -                                              cmd->i_state);
 +                      list_move_tail(&cmd->i_conn_node, &ack_list);
                        continue;
                }
                spin_unlock(&cmd->istate_lock);
        }
        spin_unlock_bh(&conn->cmd_lock);
 +
 +      list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
 +              list_del(&cmd->i_conn_node);
 +              iscsit_free_cmd(cmd, false);
 +      }
  }
  
  static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
@@@ -805,14 -800,7 +805,7 @@@ int iscsit_setup_scsi_cmd(struct iscsi_
        int iscsi_task_attr;
        int sam_task_attr;
  
-       spin_lock_bh(&conn->sess->session_stats_lock);
-       conn->sess->cmd_pdus++;
-       if (conn->sess->se_sess->se_node_acl) {
-               spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
-               conn->sess->se_sess->se_node_acl->num_cmds++;
-               spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
-       }
-       spin_unlock_bh(&conn->sess->session_stats_lock);
+       atomic_long_inc(&conn->sess->cmd_pdus);
  
        hdr                     = (struct iscsi_scsi_req *) buf;
        payload_length          = ntoh24(hdr->dlength);
@@@ -1254,20 -1242,12 +1247,12 @@@ iscsit_check_dataout_hdr(struct iscsi_c
        int rc;
  
        if (!payload_length) {
-               pr_err("DataOUT payload is ZERO, protocol error.\n");
-               return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
-                                        buf);
+               pr_warn("DataOUT payload is ZERO, ignoring.\n");
+               return 0;
        }
  
        /* iSCSI write */
-       spin_lock_bh(&conn->sess->session_stats_lock);
-       conn->sess->rx_data_octets += payload_length;
-       if (conn->sess->se_sess->se_node_acl) {
-               spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
-               conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
-               spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
-       }
-       spin_unlock_bh(&conn->sess->session_stats_lock);
+       atomic_long_add(payload_length, &conn->sess->rx_data_octets);
  
        if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
                pr_err("DataSegmentLength: %u is greater than"
@@@ -1486,7 -1466,7 +1471,7 @@@ EXPORT_SYMBOL(iscsit_check_dataout_payl
  
  static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
  {
-       struct iscsi_cmd *cmd;
+       struct iscsi_cmd *cmd = NULL;
        struct iscsi_data *hdr = (struct iscsi_data *)buf;
        int rc;
        bool data_crc_failed = false;
@@@ -1954,6 -1934,13 +1939,13 @@@ iscsit_setup_text_cmd(struct iscsi_con
                                         (unsigned char *)hdr);
        }
  
+       if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+            (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+               pr_err("Multi sequence text commands currently not supported\n");
+               return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+                                       (unsigned char *)hdr);
+       }
        pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
                " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
                hdr->exp_statsn, payload_length);
@@@ -2630,14 -2617,7 +2622,7 @@@ static int iscsit_send_datain(struct is
                return -1;
        }
  
-       spin_lock_bh(&conn->sess->session_stats_lock);
-       conn->sess->tx_data_octets += datain.length;
-       if (conn->sess->se_sess->se_node_acl) {
-               spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
-               conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
-               spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
-       }
-       spin_unlock_bh(&conn->sess->session_stats_lock);
+       atomic_long_add(datain.length, &conn->sess->tx_data_octets);
        /*
         * Special case for successfully execution w/ both DATAIN
         * and Sense Data.
@@@ -3162,9 -3142,7 +3147,7 @@@ void iscsit_build_rsp_pdu(struct iscsi_
        if (inc_stat_sn)
                cmd->stat_sn = conn->stat_sn++;
  
-       spin_lock_bh(&conn->sess->session_stats_lock);
-       conn->sess->rsp_pdus++;
-       spin_unlock_bh(&conn->sess->session_stats_lock);
+       atomic_long_inc(&conn->sess->rsp_pdus);
  
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_CMD_RSP;
@@@ -3374,6 -3352,7 +3357,7 @@@ static int iscsit_build_sendtargets_res
        struct iscsi_tiqn *tiqn;
        struct iscsi_tpg_np *tpg_np;
        int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+       int target_name_printed;
        unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
        unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
  
                        continue;
                }
  
-               len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
-               len += 1;
-               if ((len + payload_len) > buffer_len) {
-                       end_of_buf = 1;
-                       goto eob;
-               }
-               memcpy(payload + payload_len, buf, len);
-               payload_len += len;
+               target_name_printed = 0;
  
                spin_lock(&tiqn->tiqn_tpg_lock);
                list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
  
+                       /* If demo_mode_discovery=0 and generate_node_acls=0
+                        * (demo mode dislabed) do not return
+                        * TargetName+TargetAddress unless a NodeACL exists.
+                        */
+                       if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+                           (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+                           (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
+                               cmd->conn->sess->sess_ops->InitiatorName))) {
+                               continue;
+                       }
                        spin_lock(&tpg->tpg_state_lock);
                        if ((tpg->tpg_state == TPG_STATE_FREE) ||
                            (tpg->tpg_state == TPG_STATE_INACTIVE)) {
                                struct iscsi_np *np = tpg_np->tpg_np;
                                bool inaddr_any = iscsit_check_inaddr_any(np);
  
+                               if (!target_name_printed) {
+                                       len = sprintf(buf, "TargetName=%s",
+                                                     tiqn->tiqn);
+                                       len += 1;
+                                       if ((len + payload_len) > buffer_len) {
+                                               spin_unlock(&tpg->tpg_np_lock);
+                                               spin_unlock(&tiqn->tiqn_tpg_lock);
+                                               end_of_buf = 1;
+                                               goto eob;
+                                       }
+                                       memcpy(payload + payload_len, buf, len);
+                                       payload_len += len;
+                                       target_name_printed = 1;
+                               }
                                len = sprintf(buf, "TargetAddress="
                                        "%s:%hu,%hu",
                                        (inaddr_any == false) ?
@@@ -4092,9 -4091,7 +4096,7 @@@ restart
                                 * hit default in the switch below.
                                 */
                                memset(buffer, 0xff, ISCSI_HDR_LEN);
-                               spin_lock_bh(&conn->sess->session_stats_lock);
-                               conn->sess->conn_digest_errors++;
-                               spin_unlock_bh(&conn->sess->session_stats_lock);
+                               atomic_long_inc(&conn->sess->conn_digest_errors);
                        } else {
                                pr_debug("Got HeaderDigest CRC32C"
                                                " 0x%08x\n", checksum);
@@@ -4381,7 -4378,7 +4383,7 @@@ int iscsit_close_connection
  
  int iscsit_close_session(struct iscsi_session *sess)
  {
-       struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+       struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
  
        if (atomic_read(&sess->nconn)) {
index ef6d836a4d09745d57ba42e2496dd725f85c57ea,635751be5af73eadbd258347ae5a2bc020803246..83c965c65386da1ce9b8772d5fe3b7193f852f13
@@@ -88,7 -88,7 +88,7 @@@ int extract_param
        if (len < 0)
                return -1;
  
-       if (len > max_length) {
+       if (len >= max_length) {
                pr_err("Length of input: %d exceeds max_length:"
                        " %d\n", len, max_length);
                return -1;
@@@ -140,7 -140,7 +140,7 @@@ static u32 iscsi_handle_authentication
                        iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
                                                  se_node_acl);
  
-                       auth = ISCSI_NODE_AUTH(iscsi_nacl);
+                       auth = &iscsi_nacl->node_auth;
                }
        } else {
                /*
@@@ -789,7 -789,7 +789,7 @@@ static int iscsi_target_handle_csg_zero
                return -1;
  
        if (!iscsi_check_negotiated_keys(conn->param_list)) {
-               if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+               if (conn->tpg->tpg_attrib.authentication &&
                    !strncmp(param->value, NONE, 4)) {
                        pr_err("Initiator sent AuthMethod=None but"
                                " Target is enforcing iSCSI Authentication,"
                        return -1;
                }
  
-               if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+               if (conn->tpg->tpg_attrib.authentication &&
                    !login->auth_complete)
                        return 0;
  
@@@ -862,7 -862,7 +862,7 @@@ static int iscsi_target_handle_csg_one(
        }
  
        if (!login->auth_complete &&
-            ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+            conn->tpg->tpg_attrib.authentication) {
                pr_err("Initiator is requesting CSG: 1, has not been"
                         " successfully authenticated, and the Target is"
                        " enforcing iSCSI Authentication, login failed.\n");
@@@ -1192,7 -1192,7 +1192,7 @@@ get_target
         */
  alloc_tags:
        tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
 -      tag_num += ISCSIT_EXTRA_TAGS;
 +      tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
        tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
  
        ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
index b0cac0c342e1e83a9da5dc7b5f01edd48cb12136,9064926a037029365240f2f55876c43b95ff72ff..0819e688a3986586200a3131380445a084ff5291
@@@ -242,9 -242,9 +242,9 @@@ static inline int iscsit_check_received
         */
        if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
                pr_err("Received CmdSN: 0x%08x is greater than"
-                      " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+                      " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
                       sess->max_cmd_sn);
-               ret = CMDSN_ERROR_CANNOT_RECOVER;
+               ret = CMDSN_MAXCMDSN_OVERRUN;
  
        } else if (cmdsn == sess->exp_cmd_sn) {
                sess->exp_cmd_sn++;
@@@ -303,14 -303,16 +303,16 @@@ int iscsit_sequence_cmd(struct iscsi_co
                ret = CMDSN_HIGHER_THAN_EXP;
                break;
        case CMDSN_LOWER_THAN_EXP:
+       case CMDSN_MAXCMDSN_OVERRUN:
+       default:
                cmd->i_state = ISTATE_REMOVE;
                iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
-               ret = cmdsn_ret;
-               break;
-       default:
-               reason = ISCSI_REASON_PROTOCOL_ERROR;
-               reject = true;
-               ret = cmdsn_ret;
+               /*
+                * Existing callers for iscsit_sequence_cmd() will silently
+                * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+                * return for CMDSN_MAXCMDSN_OVERRUN as well..
+                */
+               ret = CMDSN_LOWER_THAN_EXP;
                break;
        }
        mutex_unlock(&conn->sess->cmdsn_mutex);
@@@ -736,7 -738,7 +738,7 @@@ void iscsit_free_cmd(struct iscsi_cmd *
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
 -              rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
 +              rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
                        __iscsit_free_cmd(cmd, true, shutdown);
                        target_put_sess_cmd(se_cmd->se_sess, se_cmd);
                        se_cmd = &cmd->se_cmd;
                        __iscsit_free_cmd(cmd, true, shutdown);
  
 -                      rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
 +                      rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
                        if (!rc && shutdown && se_cmd->se_sess) {
                                __iscsit_free_cmd(cmd, true, shutdown);
                                target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@@ -980,7 -982,7 +982,7 @@@ static void iscsit_handle_nopin_respons
                tiqn->sess_err_stats.last_sess_failure_type =
                                ISCSI_SESS_ERR_CXN_TIMEOUT;
                tiqn->sess_err_stats.cxn_timeout_errors++;
-               conn->sess->conn_timeout_errors++;
+               atomic_long_inc(&conn->sess->conn_timeout_errors);
                spin_unlock_bh(&tiqn->sess_err_stats.lock);
        }
        }
index d9b92b2c524d4f055a035f4343175fbdd3877c4c,61a30f0d7583395803f11280ebb65a5c7c33d66b..52ae54e60105652df99df8e64a619a5ba9958ab6
@@@ -105,12 -105,22 +105,22 @@@ sbc_emulate_readcapacity_16(struct se_c
        buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
        buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
        buf[11] = dev->dev_attrib.block_size & 0xff;
+       if (dev->transport->get_lbppbe)
+               buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+       if (dev->transport->get_alignment_offset_lbas) {
+               u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+               buf[14] = (lalba >> 8) & 0x3f;
+               buf[15] = lalba & 0xff;
+       }
        /*
         * Set Thin Provisioning Enable bit following sbc3r22 in section
         * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
         */
        if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
-               buf[14] = 0x80;
+               buf[14] |= 0x80;
  
        rbuf = transport_kmap_data_sg(cmd);
        if (rbuf) {
@@@ -263,11 -273,6 +273,11 @@@ sbc_setup_write_same(struct se_cmd *cmd
                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
                return TCM_INVALID_CDB_FIELD;
        }
 +      /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
 +      if (flags[0] & 0x10) {
 +              pr_warn("WRITE SAME with ANCHOR not supported\n");
 +              return TCM_INVALID_CDB_FIELD;
 +      }
        /*
         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
         * translated into block discard requests within backend code.
@@@ -354,16 -359,7 +364,16 @@@ static sense_reason_t compare_and_write
  {
        struct se_device *dev = cmd->se_dev;
  
 -      cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
 +      /*
 +       * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
 +       * within target_complete_ok_work() if the command was successfully
 +       * sent to the backend driver.
 +       */
 +      spin_lock_irq(&cmd->t_state_lock);
 +      if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
 +              cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
 +      spin_unlock_irq(&cmd->t_state_lock);
 +
        /*
         * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
         * before the original READ I/O submission.
@@@ -377,7 -373,7 +387,7 @@@ static sense_reason_t compare_and_write
  {
        struct se_device *dev = cmd->se_dev;
        struct scatterlist *write_sg = NULL, *sg;
 -      unsigned char *buf, *addr;
 +      unsigned char *buf = NULL, *addr;
        struct sg_mapping_iter m;
        unsigned int offset = 0, len;
        unsigned int nlbas = cmd->t_task_nolb;
         */
        if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
                return TCM_NO_SENSE;
 +      /*
 +       * Immediately exit + release dev->caw_sem if command has already
 +       * been failed with a non-zero SCSI status.
 +       */
 +      if (cmd->scsi_status) {
 +              pr_err("compare_and_write_callback: non zero scsi_status:"
 +                      " 0x%02x\n", cmd->scsi_status);
 +              goto out;
 +      }
  
        buf = kzalloc(cmd->data_length, GFP_KERNEL);
        if (!buf) {
@@@ -531,12 -518,6 +541,12 @@@ sbc_compare_and_write(struct se_cmd *cm
                cmd->transport_complete_callback = NULL;
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 +      /*
 +       * Reset cmd->data_length to individual block_size in order to not
 +       * confuse backend drivers that depend on this value matching the
 +       * size of the I/O being submitted.
 +       */
 +      cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
  
        ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
                              DMA_FROM_DEVICE);
index 81e945eefbbdd0572181d84e3cd9d014895e4eb7,269a7987dab9d780c931e9ad9ac3dfde919265c6..91953da0f62329af488a91eaa1d83be6fda68f08
@@@ -28,7 -28,6 +28,6 @@@
  #include <linux/string.h>
  #include <linux/timer.h>
  #include <linux/slab.h>
- #include <linux/blkdev.h>
  #include <linux/spinlock.h>
  #include <linux/kthread.h>
  #include <linux/in.h>
@@@ -236,24 -235,17 +235,24 @@@ int transport_alloc_session_tags(struc
  {
        int rc;
  
 -      se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL);
 +      se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
 +                                      GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
        if (!se_sess->sess_cmd_map) {
 -              pr_err("Unable to allocate se_sess->sess_cmd_map\n");
 -              return -ENOMEM;
 +              se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
 +              if (!se_sess->sess_cmd_map) {
 +                      pr_err("Unable to allocate se_sess->sess_cmd_map\n");
 +                      return -ENOMEM;
 +              }
        }
  
        rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
        if (rc < 0) {
                pr_err("Unable to init se_sess->sess_tag_pool,"
                        " tag_num: %u\n", tag_num);
 -              kfree(se_sess->sess_cmd_map);
 +              if (is_vmalloc_addr(se_sess->sess_cmd_map))
 +                      vfree(se_sess->sess_cmd_map);
 +              else
 +                      kfree(se_sess->sess_cmd_map);
                se_sess->sess_cmd_map = NULL;
                return -ENOMEM;
        }
@@@ -419,10 -411,7 +418,10 @@@ void transport_free_session(struct se_s
  {
        if (se_sess->sess_cmd_map) {
                percpu_ida_destroy(&se_sess->sess_tag_pool);
 -              kfree(se_sess->sess_cmd_map);
 +              if (is_vmalloc_addr(se_sess->sess_cmd_map))
 +                      vfree(se_sess->sess_cmd_map);
 +              else
 +                      kfree(se_sess->sess_cmd_map);
        }
        kmem_cache_free(se_sess_cache, se_sess);
  }
@@@ -473,7 -462,7 +472,7 @@@ void transport_deregister_session(struc
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
        /*
-        * If last kref is dropping now for an explict NodeACL, awake sleeping
+        * If last kref is dropping now for an explicit NodeACL, awake sleeping
         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
         * removal context.
         */
@@@ -515,23 -504,6 +514,6 @@@ static int transport_cmd_check_stop(str
        if (write_pending)
                cmd->t_state = TRANSPORT_WRITE_PENDING;
  
-       /*
-        * Determine if IOCTL context caller in requesting the stopping of this
-        * command for LUN shutdown purposes.
-        */
-       if (cmd->transport_state & CMD_T_LUN_STOP) {
-               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-               cmd->transport_state &= ~CMD_T_ACTIVE;
-               if (remove_from_lists)
-                       target_remove_from_state_list(cmd);
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               complete(&cmd->transport_lun_stop_comp);
-               return 1;
-       }
        if (remove_from_lists) {
                target_remove_from_state_list(cmd);
  
@@@ -585,15 -557,11 +567,11 @@@ static int transport_cmd_check_stop_to_
  static void transport_lun_remove_cmd(struct se_cmd *cmd)
  {
        struct se_lun *lun = cmd->se_lun;
-       unsigned long flags;
  
-       if (!lun)
+       if (!lun || !cmd->lun_ref_active)
                return;
  
-       spin_lock_irqsave(&lun->lun_cmd_lock, flags);
-       if (!list_empty(&cmd->se_lun_node))
-               list_del_init(&cmd->se_lun_node);
-       spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+       percpu_ref_put(&lun->lun_ref);
  }
  
  void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
@@@ -668,7 -636,7 +646,7 @@@ void target_complete_cmd(struct se_cmd 
                cmd->transport_state |= CMD_T_FAILED;
  
        /*
-        * Check for case where an explict ABORT_TASK has been received
+        * Check for case where an explicit ABORT_TASK has been received
         * and transport_wait_for_tasks() will be waiting for completion..
         */
        if (cmd->transport_state & CMD_T_ABORTED &&
@@@ -1092,13 -1060,10 +1070,10 @@@ void transport_init_se_cmd
        int task_attr,
        unsigned char *sense_buffer)
  {
-       INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
        INIT_LIST_HEAD(&cmd->state_list);
-       init_completion(&cmd->transport_lun_fe_stop_comp);
-       init_completion(&cmd->transport_lun_stop_comp);
        init_completion(&cmd->t_transport_stop_comp);
        init_completion(&cmd->cmd_wait_comp);
        init_completion(&cmd->task_stop_comp);
@@@ -1719,29 -1684,14 +1694,14 @@@ void target_execute_cmd(struct se_cmd *
        /*
         * If the received CDB has aleady been aborted stop processing it here.
         */
-       if (transport_check_aborted_status(cmd, 1)) {
-               complete(&cmd->transport_lun_stop_comp);
+       if (transport_check_aborted_status(cmd, 1))
                return;
-       }
  
-       /*
-        * Determine if IOCTL context caller in requesting the stopping of this
-        * command for LUN shutdown purposes.
-        */
-       spin_lock_irq(&cmd->t_state_lock);
-       if (cmd->transport_state & CMD_T_LUN_STOP) {
-               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-               cmd->transport_state &= ~CMD_T_ACTIVE;
-               spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->transport_lun_stop_comp);
-               return;
-       }
        /*
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
         */
+       spin_lock_irq(&cmd->t_state_lock);
        if (cmd->transport_state & CMD_T_STOP) {
                pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
                        __func__, __LINE__,
@@@ -2404,164 -2354,23 +2364,23 @@@ void target_wait_for_sess_cmds(struct s
  }
  EXPORT_SYMBOL(target_wait_for_sess_cmds);
  
- /*    transport_lun_wait_for_tasks():
-  *
-  *    Called from ConfigFS context to stop the passed struct se_cmd to allow
-  *    an struct se_lun to be successfully shutdown.
-  */
- static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
- {
-       unsigned long flags;
-       int ret = 0;
-       /*
-        * If the frontend has already requested this struct se_cmd to
-        * be stopped, we can safely ignore this struct se_cmd.
-        */
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (cmd->transport_state & CMD_T_STOP) {
-               cmd->transport_state &= ~CMD_T_LUN_STOP;
-               pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
-                        cmd->se_tfo->get_task_tag(cmd));
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               transport_cmd_check_stop(cmd, false, false);
-               return -EPERM;
-       }
-       cmd->transport_state |= CMD_T_LUN_FE_STOP;
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-       // XXX: audit task_flags checks.
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if ((cmd->transport_state & CMD_T_BUSY) &&
-           (cmd->transport_state & CMD_T_SENT)) {
-               if (!target_stop_cmd(cmd, &flags))
-                       ret++;
-       }
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-       pr_debug("ConfigFS: cmd: %p stop tasks ret:"
-                       " %d\n", cmd, ret);
-       if (!ret) {
-               pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
-                               cmd->se_tfo->get_task_tag(cmd));
-               wait_for_completion(&cmd->transport_lun_stop_comp);
-               pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
-                               cmd->se_tfo->get_task_tag(cmd));
-       }
-       return 0;
- }
- static void __transport_clear_lun_from_sessions(struct se_lun *lun)
- {
-       struct se_cmd *cmd = NULL;
-       unsigned long lun_flags, cmd_flags;
-       /*
-        * Do exception processing and return CHECK_CONDITION status to the
-        * Initiator Port.
-        */
-       spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
-       while (!list_empty(&lun->lun_cmd_list)) {
-               cmd = list_first_entry(&lun->lun_cmd_list,
-                      struct se_cmd, se_lun_node);
-               list_del_init(&cmd->se_lun_node);
-               spin_lock(&cmd->t_state_lock);
-               pr_debug("SE_LUN[%d] - Setting cmd->transport"
-                       "_lun_stop for  ITT: 0x%08x\n",
-                       cmd->se_lun->unpacked_lun,
-                       cmd->se_tfo->get_task_tag(cmd));
-               cmd->transport_state |= CMD_T_LUN_STOP;
-               spin_unlock(&cmd->t_state_lock);
-               spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-               if (!cmd->se_lun) {
-                       pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
-                               cmd->se_tfo->get_task_tag(cmd),
-                               cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
-                       BUG();
-               }
-               /*
-                * If the Storage engine still owns the iscsi_cmd_t, determine
-                * and/or stop its context.
-                */
-               pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
-                       "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
-                       cmd->se_tfo->get_task_tag(cmd));
-               if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
-                       spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
-                       continue;
-               }
-               pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
-                       "_wait_for_tasks(): SUCCESS\n",
-                       cmd->se_lun->unpacked_lun,
-                       cmd->se_tfo->get_task_tag(cmd));
-               spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
-                       spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-                       goto check_cond;
-               }
-               cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
-               target_remove_from_state_list(cmd);
-               spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-               /*
-                * The Storage engine stopped this struct se_cmd before it was
-                * send to the fabric frontend for delivery back to the
-                * Initiator Node.  Return this SCSI CDB back with an
-                * CHECK_CONDITION status.
-                */
- check_cond:
-               transport_send_check_condition_and_sense(cmd,
-                               TCM_NON_EXISTENT_LUN, 0);
-               /*
-                *  If the fabric frontend is waiting for this iscsi_cmd_t to
-                * be released, notify the waiting thread now that LU has
-                * finished accessing it.
-                */
-               spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-               if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
-                       pr_debug("SE_LUN[%d] - Detected FE stop for"
-                               " struct se_cmd: %p ITT: 0x%08x\n",
-                               lun->unpacked_lun,
-                               cmd, cmd->se_tfo->get_task_tag(cmd));
-                       spin_unlock_irqrestore(&cmd->t_state_lock,
-                                       cmd_flags);
-                       transport_cmd_check_stop(cmd, false, false);
-                       complete(&cmd->transport_lun_fe_stop_comp);
-                       spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
-                       continue;
-               }
-               pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
-                       lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
-               spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
-               spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
-       }
-       spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
- }
- static int transport_clear_lun_thread(void *p)
+ static int transport_clear_lun_ref_thread(void *p)
  {
        struct se_lun *lun = p;
  
-       __transport_clear_lun_from_sessions(lun);
+       percpu_ref_kill(&lun->lun_ref);
+       wait_for_completion(&lun->lun_ref_comp);
        complete(&lun->lun_shutdown_comp);
  
        return 0;
  }
  
- int transport_clear_lun_from_sessions(struct se_lun *lun)
+ int transport_clear_lun_ref(struct se_lun *lun)
  {
        struct task_struct *kt;
  
-       kt = kthread_run(transport_clear_lun_thread, lun,
+       kt = kthread_run(transport_clear_lun_ref_thread, lun,
                        "tcm_cl_%u", lun->unpacked_lun);
        if (IS_ERR(kt)) {
                pr_err("Unable to start clear_lun thread\n");
@@@ -2595,43 -2404,6 +2414,6 @@@ bool transport_wait_for_tasks(struct se
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return false;
        }
-       /*
-        * If we are already stopped due to an external event (ie: LUN shutdown)
-        * sleep until the connection can have the passed struct se_cmd back.
-        * The cmd->transport_lun_stopped_sem will be upped by
-        * transport_clear_lun_from_sessions() once the ConfigFS context caller
-        * has completed its operation on the struct se_cmd.
-        */
-       if (cmd->transport_state & CMD_T_LUN_STOP) {
-               pr_debug("wait_for_tasks: Stopping"
-                       " wait_for_completion(&cmd->t_tasktransport_lun_fe"
-                       "_stop_comp); for ITT: 0x%08x\n",
-                       cmd->se_tfo->get_task_tag(cmd));
-               /*
-                * There is a special case for WRITES where a FE exception +
-                * LUN shutdown means ConfigFS context is still sleeping on
-                * transport_lun_stop_comp in transport_lun_wait_for_tasks().
-                * We go ahead and up transport_lun_stop_comp just to be sure
-                * here.
-                */
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               complete(&cmd->transport_lun_stop_comp);
-               wait_for_completion(&cmd->transport_lun_fe_stop_comp);
-               spin_lock_irqsave(&cmd->t_state_lock, flags);
-               target_remove_from_state_list(cmd);
-               /*
-                * At this point, the frontend who was the originator of this
-                * struct se_cmd, now owns the structure and can be released through
-                * normal means below.
-                */
-               pr_debug("wait_for_tasks: Stopped"
-                       " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
-                       "stop_comp); for ITT: 0x%08x\n",
-                       cmd->se_tfo->get_task_tag(cmd));
-               cmd->transport_state &= ~CMD_T_LUN_STOP;
-       }
  
        if (!(cmd->transport_state & CMD_T_ACTIVE)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@@ -2910,6 -2682,7 +2692,7 @@@ int transport_check_aborted_status(stru
                 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
  
        cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
        trace_target_cmd_complete(cmd);
        cmd->se_tfo->queue_status(cmd);
  
@@@ -2938,6 -2711,7 +2721,7 @@@ void transport_send_task_abort(struct s
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        cmd->transport_state |= CMD_T_ABORTED;
                        smp_mb__after_atomic_inc();
+                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
index 474cd44fac14d61b530a6100d5e6060086777560,d67304a6aa9e679fc0e28189fa2d0f74c89587a6..6b88a9958f6126ab9267cc1c23e97dd8e59b5a57
@@@ -82,9 -82,6 +82,9 @@@ static int target_xcopy_locate_se_dev_e
        mutex_lock(&g_device_mutex);
        list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
  
 +              if (!se_dev->dev_attrib.emulate_3pc)
 +                      continue;
 +
                memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
                target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
  
@@@ -301,8 -298,8 +301,8 @@@ static int target_xcopy_parse_segdesc_0
                (unsigned long long)xop->dst_lba);
  
        if (dc != 0) {
 -              xop->dbl = (desc[29] << 16) & 0xff;
 -              xop->dbl |= (desc[30] << 8) & 0xff;
 +              xop->dbl = (desc[29] & 0xff) << 16;
 +              xop->dbl |= (desc[30] & 0xff) << 8;
                xop->dbl |= desc[31] & 0xff;
  
                pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
@@@ -360,7 -357,6 +360,7 @@@ struct xcopy_pt_cmd 
        struct se_cmd se_cmd;
        struct xcopy_op *xcopy_op;
        struct completion xpt_passthrough_sem;
 +      unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
  };
  
  static struct se_port xcopy_pt_port;
@@@ -405,9 -401,6 +405,6 @@@ static void xcopy_pt_release_cmd(struc
        struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
                                struct xcopy_pt_cmd, se_cmd);
  
-       if (xpt_cmd->remote_port)
-               kfree(se_cmd->se_lun);
        kfree(xpt_cmd);
  }
  
@@@ -572,22 -565,10 +569,10 @@@ static int target_xcopy_init_pt_lun
                return 0;
        }
  
-       pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
-       if (!pt_cmd->se_lun) {
-               pr_err("Unable to allocate pt_cmd->se_lun\n");
-               return -ENOMEM;
-       }
-       init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
-       INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
-       INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
-       spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
-       spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
-       spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
+       pt_cmd->se_lun = &se_dev->xcopy_lun;
        pt_cmd->se_dev = se_dev;
  
        pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
-       pt_cmd->se_lun->lun_se_dev = se_dev;
        pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
  
        pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
@@@ -658,8 -639,6 +643,6 @@@ static int target_xcopy_setup_pt_cmd
        return 0;
  
  out:
-       if (remote_port == true)
-               kfree(cmd->se_lun);
        return ret;
  }
  
@@@ -679,8 -658,7 +662,8 @@@ static int target_xcopy_issue_pt_cmd(st
  
        pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
                        se_cmd->scsi_status);
 -      return 0;
 +
 +      return (se_cmd->scsi_status) ? -EINVAL : 0;
  }
  
  static int target_xcopy_read_source(
                (unsigned long long)src_lba, src_sectors, length);
  
        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
 -                              DMA_FROM_DEVICE, 0, NULL);
 +                            DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
        xop->src_pt_cmd = xpt_cmd;
  
        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@@ -773,7 -751,7 +756,7 @@@ static int target_xcopy_write_destinati
                (unsigned long long)dst_lba, dst_sectors, length);
  
        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
 -                              DMA_TO_DEVICE, 0, NULL);
 +                            DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
        xop->dst_pt_cmd = xpt_cmd;
  
        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@@ -889,42 -867,30 +872,42 @@@ out
  
  sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
  {
 +      struct se_device *dev = se_cmd->se_dev;
        struct xcopy_op *xop = NULL;
        unsigned char *p = NULL, *seg_desc;
        unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
 +      sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
        int rc;
        unsigned short tdll;
  
 +      if (!dev->dev_attrib.emulate_3pc) {
 +              pr_err("EXTENDED_COPY operation explicitly disabled\n");
 +              return TCM_UNSUPPORTED_SCSI_OPCODE;
 +      }
 +
        sa = se_cmd->t_task_cdb[1] & 0x1f;
        if (sa != 0x00) {
                pr_err("EXTENDED_COPY(LID4) not supported\n");
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
  
 +      xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
 +      if (!xop) {
 +              pr_err("Unable to allocate xcopy_op\n");
 +              return TCM_OUT_OF_RESOURCES;
 +      }
 +      xop->xop_se_cmd = se_cmd;
 +
        p = transport_kmap_data_sg(se_cmd);
        if (!p) {
                pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
 +              kfree(xop);
                return TCM_OUT_OF_RESOURCES;
        }
  
        list_id = p[0];
 -      if (list_id != 0x00) {
 -              pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
 -              goto out;
 -      }
 -      list_id_usage = (p[1] & 0x18);
 +      list_id_usage = (p[1] & 0x18) >> 3;
 +
        /*
         * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
         */
                goto out;
        }
  
 -      xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
 -      if (!xop) {
 -              pr_err("Unable to allocate xcopy_op\n");
 -              goto out;
 -      }
 -      xop->xop_se_cmd = se_cmd;
 -
        pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
                tdll, sdll, inline_dl);
        if (rc <= 0)
                goto out;
  
 +      if (xop->src_dev->dev_attrib.block_size !=
 +          xop->dst_dev->dev_attrib.block_size) {
 +              pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
 +                     " block_size: %u currently unsupported\n",
 +                      xop->src_dev->dev_attrib.block_size,
 +                      xop->dst_dev->dev_attrib.block_size);
 +              xcopy_pt_undepend_remotedev(xop);
 +              ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 +              goto out;
 +      }
 +
        pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
                                rc * XCOPY_TARGET_DESC_LEN);
        seg_desc = &p[16];
@@@ -978,7 -940,7 +961,7 @@@ out
        if (p)
                transport_kunmap_data_sg(se_cmd);
        kfree(xop);
 -      return TCM_INVALID_CDB_FIELD;
 +      return ret;
  }
  
  static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
index eccea1df702df3afc45c692de31b23febfb52c37,9d89905e231864c89f3afca84c824ea5bdd66cf8..6c3d7950d2a9e56d5231938493127c0964f04221
@@@ -472,7 -472,7 +472,7 @@@ static int usbg_bot_setup(struct usb_fu
                bot_enqueue_cmd_cbw(fu);
                return 0;
                break;
 -      };
 +      }
        return -ENOTSUPP;
  }
  
@@@ -617,7 -617,7 +617,7 @@@ static void uasp_status_data_cmpl(struc
  
        default:
                BUG();
 -      };
 +      }
        return;
  
  cleanup:
@@@ -1923,15 -1923,15 +1923,15 @@@ static int usbg_register_configfs(void
        }
  
        fabric->tf_ops = usbg_ops;
-       TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
-       TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
-       TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
        ret = target_fabric_configfs_register(fabric);
        if (ret < 0) {
                printk(KERN_ERR "target_fabric_configfs_register() failed"
diff --combined drivers/vhost/scsi.c
index e663921eebb6f899db67e352077689b6e84d270f,0225c1f5eb397774d1a6ec7d31f2adf82cbc30f4..f175629513ed3dc0f5ad30eb3d8510506a894e1b
@@@ -728,12 -728,7 +728,12 @@@ vhost_scsi_get_tag(struct vhost_virtque
        }
        se_sess = tv_nexus->tvn_se_sess;
  
 -      tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL);
 +      tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
 +      if (tag < 0) {
 +              pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
 +              return ERR_PTR(-ENOMEM);
 +      }
 +
        cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
        pages = cmd->tvc_upages;
@@@ -1056,7 -1051,7 +1056,7 @@@ vhost_scsi_handle_vq(struct vhost_scsi 
                if (data_direction != DMA_NONE) {
                        ret = vhost_scsi_map_iov_to_sgl(cmd,
                                        &vq->iov[data_first], data_num,
 -                                      data_direction == DMA_TO_DEVICE);
 +                                      data_direction == DMA_FROM_DEVICE);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
                                goto err_free;
@@@ -2168,15 -2163,15 +2168,15 @@@ static int tcm_vhost_register_configfs(
        /*
         * Setup default attribute lists for various fabric->tf_cit_tmpl
         */
-       TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
-       TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
-       TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
-       TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
        /*
         * Register the fabric for use within TCM
         */
diff --combined lib/Makefile
index b46065fd67a4741dadeabe1ced0e67ed6269b741,8ed321179849f416fc32849e2192351473fe99d5..a459c31e8c6bb0286237b27bcb65d776bccad1b7
@@@ -13,7 -13,7 +13,7 @@@ lib-y := ctype.o string.o vsprintf.o cm
         sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
         proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o percpu-refcount.o percpu_ida.o
+        earlycpio.o
  
  obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
  lib-$(CONFIG_MMU) += ioremap.o
@@@ -26,7 -26,7 +26,7 @@@ obj-y += bcd.o div64.o sort.o parser.o 
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
-        percpu_ida.o
+        percpu-refcount.o percpu_ida.o
  obj-y += string_helpers.o
  obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
  obj-y += kstrtox.o
@@@ -42,12 -42,15 +42,12 @@@ obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_
  obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
  obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
  obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 -obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 -lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 -lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 -lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
  
  CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
  obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
  
  obj-$(CONFIG_BTREE) += btree.o
 +obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
  obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
  obj-$(CONFIG_DEBUG_LIST) += list_debug.o
  obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@@ -154,8 -157,6 +154,8 @@@ obj-$(CONFIG_INTERVAL_TREE_TEST) += int
  
  interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
  
 +obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
 +
  obj-$(CONFIG_ASN1) += asn1_decoder.o
  
  obj-$(CONFIG_FONT_SUPPORT) += fonts/
diff --combined lib/percpu_ida.c
index b0698ea972c65025976e87d6e1a5bb5beac92747,e315903ec63a80fd95581806a1ee01c2c331d58e..9d054bf91d0f3cd278494dedd3114dfd3b292bcb
  #include <linux/spinlock.h>
  #include <linux/percpu_ida.h>
  
 -/*
 - * Number of tags we move between the percpu freelist and the global freelist at
 - * a time
 - */
 -#define IDA_PCPU_BATCH_MOVE   32U
 -
 -/* Max size of percpu freelist, */
 -#define IDA_PCPU_SIZE         ((IDA_PCPU_BATCH_MOVE * 3) / 2)
 -
  struct percpu_ida_cpu {
        /*
         * Even though this is percpu, we need a lock for tag stealing by remote
@@@ -69,7 -78,7 +69,7 @@@ static inline void steal_tags(struct pe
        struct percpu_ida_cpu *remote;
  
        for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
 -           cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2;
 +           cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
             cpus_have_tags--) {
                cpu = cpumask_next(cpu, &pool->cpus_have_tags);
  
@@@ -114,11 -123,10 +114,10 @@@ static inline void alloc_global_tags(st
  {
        move_tags(tags->freelist, &tags->nr_free,
                  pool->freelist, &pool->nr_free,
 -                min(pool->nr_free, IDA_PCPU_BATCH_MOVE));
 +                min(pool->nr_free, pool->percpu_batch_size));
  }
  
- static inline unsigned alloc_local_tag(struct percpu_ida *pool,
-                                      struct percpu_ida_cpu *tags)
+ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
  {
        int tag = -ENOSPC;
  
@@@ -159,7 -167,7 +158,7 @@@ int percpu_ida_alloc(struct percpu_ida 
        tags = this_cpu_ptr(pool->tag_cpu);
  
        /* Fastpath */
-       tag = alloc_local_tag(pool, tags);
+       tag = alloc_local_tag(tags);
        if (likely(tag >= 0)) {
                local_irq_restore(flags);
                return tag;
@@@ -236,17 -244,17 +235,17 @@@ void percpu_ida_free(struct percpu_ida 
                wake_up(&pool->wait);
        }
  
 -      if (nr_free == IDA_PCPU_SIZE) {
 +      if (nr_free == pool->percpu_max_size) {
                spin_lock(&pool->lock);
  
                /*
                 * Global lock held and irqs disabled, don't need percpu
                 * lock
                 */
 -              if (tags->nr_free == IDA_PCPU_SIZE) {
 +              if (tags->nr_free == pool->percpu_max_size) {
                        move_tags(pool->freelist, &pool->nr_free,
                                  tags->freelist, &tags->nr_free,
 -                                IDA_PCPU_BATCH_MOVE);
 +                                pool->percpu_batch_size);
  
                        wake_up(&pool->wait);
                }
@@@ -283,8 -291,7 +282,8 @@@ EXPORT_SYMBOL_GPL(percpu_ida_destroy)
   * Allocation is percpu, but sharding is limited by nr_tags - for best
   * performance, the workload should not span more cpus than nr_tags / 128.
   */
 -int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
 +int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
 +      unsigned long max_size, unsigned long batch_size)
  {
        unsigned i, cpu, order;
  
        init_waitqueue_head(&pool->wait);
        spin_lock_init(&pool->lock);
        pool->nr_tags = nr_tags;
 +      pool->percpu_max_size = max_size;
 +      pool->percpu_batch_size = batch_size;
  
        /* Guard against overflow */
        if (nr_tags > (unsigned) INT_MAX + 1) {
        pool->nr_free = nr_tags;
  
        pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
 -                                     IDA_PCPU_SIZE * sizeof(unsigned),
 +                                     pool->percpu_max_size * sizeof(unsigned),
                                       sizeof(unsigned));
        if (!pool->tag_cpu)
                goto err;
@@@ -326,65 -331,4 +325,65 @@@ err
        percpu_ida_destroy(pool);
        return -ENOMEM;
  }
 -EXPORT_SYMBOL_GPL(percpu_ida_init);
 +EXPORT_SYMBOL_GPL(__percpu_ida_init);
 +
 +/**
 + * percpu_ida_for_each_free - iterate free ids of a pool
 + * @pool: pool to iterate
 + * @fn: interate callback function
 + * @data: parameter for @fn
 + *
 + * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
 + * ids might be missed, some might be iterated duplicated, and some might
 + * be iterated and not free soon.
 + */
 +int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
 +      void *data)
 +{
 +      unsigned long flags;
 +      struct percpu_ida_cpu *remote;
 +      unsigned cpu, i, err = 0;
 +
 +      local_irq_save(flags);
 +      for_each_possible_cpu(cpu) {
 +              remote = per_cpu_ptr(pool->tag_cpu, cpu);
 +              spin_lock(&remote->lock);
 +              for (i = 0; i < remote->nr_free; i++) {
 +                      err = fn(remote->freelist[i], data);
 +                      if (err)
 +                              break;
 +              }
 +              spin_unlock(&remote->lock);
 +              if (err)
 +                      goto out;
 +      }
 +
 +      spin_lock(&pool->lock);
 +      for (i = 0; i < pool->nr_free; i++) {
 +              err = fn(pool->freelist[i], data);
 +              if (err)
 +                      break;
 +      }
 +      spin_unlock(&pool->lock);
 +out:
 +      local_irq_restore(flags);
 +      return err;
 +}
 +EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
 +
 +/**
 + * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
 + * @pool: pool related
 + * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
 + *
 + * Note: this just returns a snapshot of free tags number.
 + */
 +unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
 +{
 +      struct percpu_ida_cpu *remote;
 +      if (cpu == nr_cpu_ids)
 +              return pool->nr_free;
 +      remote = per_cpu_ptr(pool->tag_cpu, cpu);
 +      return remote->nr_free;
 +}
 +EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
This page took 0.088142 seconds and 5 git commands to generate.