* Safe to call with AFU in a partially allocated/initialized state.
*
* Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout and then unmaps the MMIO space.
+ * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
ssleep(1);
+ if (afu_is_irqpoll_enabled(afu))
+ irq_poll_disable(&afu->irqpoll);
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
global = &afu->afu_map->global;
/* Notify AFU */
- for (i = 0; i < NUM_FC_PORTS; i++) {
+ for (i = 0; i < cfg->num_fc_ports; i++) {
reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
reg |= SISL_FC_SHUTDOWN_NORMAL;
writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
return;
/* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < NUM_FC_PORTS; i++) {
+ for (i = 0; i < cfg->num_fc_ports; i++) {
retry_cnt = 0;
while (true) {
status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
*/
static void afu_err_intr_init(struct afu *afu)
{
+ struct cxlflash_cfg *cfg = afu->parent;
int i;
u64 reg;
writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
/* now clear FC errors */
- for (i = 0; i < NUM_FC_PORTS; i++) {
+ for (i = 0; i < cfg->num_fc_ports; i++) {
writeq_be(0xFFFFFFFFU,
&afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
/**
* process_hrrq() - process the read-response queue
* @afu: AFU associated with the host.
+ * @doneq: Queue of commands harvested from the RRQ.
+ * @budget: Threshold of RRQ entries to process.
+ *
+ * This routine must be called holding the disabled RRQ spin lock.
*
* Return: The number of entries processed.
*/
-static int process_hrrq(struct afu *afu)
+static int process_hrrq(struct afu *afu, struct list_head *doneq, int budget)
{
struct afu_cmd *cmd;
struct sisl_ioasa *ioasa;
*hrrq_end = afu->hrrq_end,
*hrrq_curr = afu->hrrq_curr;
- /* Process however many RRQ entries that are ready */
+ /* Process ready RRQ entries up to the specified budget (if any) */
while (true) {
entry = *hrrq_curr;
cmd = container_of(ioarcb, struct afu_cmd, rcb);
}
- cmd_complete(cmd);
+ list_add_tail(&cmd->queue, doneq);
/* Advance to next entry or wrap and flip the toggle bit */
if (hrrq_curr < hrrq_end)
atomic_inc(&afu->hsq_credits);
num_hrrq++;
+
+ if (budget > 0 && num_hrrq >= budget)
+ break;
}
afu->hrrq_curr = hrrq_curr;
return num_hrrq;
}
+/**
+ * process_cmd_doneq() - process a queue of harvested RRQ commands
+ * @doneq: Queue of completed commands.
+ *
+ * Note that upon return the queue can no longer be trusted.
+ */
+static void process_cmd_doneq(struct list_head *doneq)
+{
+ struct afu_cmd *cmd, *tmp;
+
+ WARN_ON(list_empty(doneq));
+
+ list_for_each_entry_safe(cmd, tmp, doneq, queue)
+ cmd_complete(cmd);
+}
+
+/**
+ * cxlflash_irqpoll() - process a queue of harvested RRQ commands
+ * @irqpoll: IRQ poll structure associated with queue to poll.
+ * @budget: Threshold of RRQ entries to process per poll.
+ *
+ * Return: The number of entries processed.
+ */
+static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+ struct afu *afu = container_of(irqpoll, struct afu, irqpoll);
+ unsigned long hrrq_flags;
+ LIST_HEAD(doneq);
+ int num_entries = 0;
+
+ spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
+
+ num_entries = process_hrrq(afu, &doneq, budget);
+ if (num_entries < budget)
+ irq_poll_complete(irqpoll);
+
+ spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
+
+ process_cmd_doneq(&doneq);
+ return num_entries;
+}
+
/**
* cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
* @irq: Interrupt number.
* @data: Private data provided at interrupt registration, the AFU.
*
- * Return: Always return IRQ_HANDLED.
+ * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
*/
static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
{
struct afu *afu = (struct afu *)data;
+ unsigned long hrrq_flags;
+ LIST_HEAD(doneq);
+ int num_entries = 0;
+
+ spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
+
+ if (afu_is_irqpoll_enabled(afu)) {
+ irq_poll_sched(&afu->irqpoll);
+ spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
- process_hrrq(afu);
+ num_entries = process_hrrq(afu, &doneq, -1);
+ spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
+
+ if (num_entries == 0)
+ return IRQ_NONE;
+
+ process_cmd_doneq(&doneq);
return IRQ_HANDLED;
}
/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
- * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
+ * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
*
* Return: 0 on success, -errno on failure
*/
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
- char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
+ char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6" };
/* Get the VPD data from the device */
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
* because the conversion service requires that the ASCII
* string be terminated.
*/
- for (k = 0; k < NUM_FC_PORTS; k++) {
+ for (k = 0; k < cfg->num_fc_ports; k++) {
j = ro_size;
i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
rc = -ENODEV;
goto out;
}
+
+ dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
}
out:
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
+ u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
int i = 0, num_ports = 0;
int rc = 0;
u64 reg;
goto out;
}
- dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
- __func__, wwpn[0], wwpn[1]);
-
/* Set up RRQ and SQ in AFU for master issued cmds */
writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
if (afu->internal_lun) {
/* Only use port 0 */
writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = NUM_FC_PORTS - 1;
+ num_ports = 0;
} else {
writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = NUM_FC_PORTS;
+ num_ports = cfg->num_fc_ports;
}
for (i = 0; i < num_ports; i++) {
init_pcr(cfg);
- /* After an AFU reset, RRQ entries are stale, clear them */
+ /* Initialize RRQ */
memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
-
- /* Initialize RRQ pointers */
afu->hrrq_start = &afu->rrq_entry[0];
afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
afu->hrrq_curr = afu->hrrq_start;
afu->toggle = 1;
+ spin_lock_init(&afu->hrrq_slock);
/* Initialize SQ */
if (afu_is_sq_cmd_mode(afu)) {
atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
}
+ /* Initialize IRQ poll */
+ if (afu_is_irqpoll_enabled(afu))
+ irq_poll_init(&afu->irqpoll, afu->irqpoll_weight,
+ cxlflash_irqpoll);
+
rc = init_global(cfg);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
/**
* cxlflash_show_port_status() - queries and presents the current port status
* @port: Desired port for status reporting.
- * @afu: AFU owning the specified port.
+ * @cfg: Internal structure associated with the host.
* @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
*
- * Return: The size of the ASCII string returned in @buf.
+ * Return: The size of the ASCII string returned in @buf or -EINVAL.
*/
-static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
+static ssize_t cxlflash_show_port_status(u32 port,
+ struct cxlflash_cfg *cfg,
+ char *buf)
{
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
char *disp_status;
u64 status;
__be64 __iomem *fc_regs;
- if (port >= NUM_FC_PORTS)
- return 0;
+ WARN_ON(port >= MAX_FC_PORTS);
+
+ if (port >= cfg->num_fc_ports) {
+ dev_info(dev, "%s: Port %d not supported on this card.\n",
+ __func__, port);
+ return -EINVAL;
+ }
fc_regs = &afu->afu_map->global.fc_regs[port][0];
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_status(0, afu, buf);
+ return cxlflash_show_port_status(0, cfg, buf);
}
/**
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_status(1, afu, buf);
+ return cxlflash_show_port_status(1, cfg, buf);
}
/**
/*
* When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be 2 (default).
+ * channel number 0, else there will be one less than the number
+ * of fc ports for this card.
*/
if (afu->internal_lun)
shost->max_channel = 0;
else
- shost->max_channel = NUM_FC_PORTS - 1;
+ shost->max_channel = cfg->num_fc_ports - 1;
afu_reset(cfg);
scsi_scan_host(cfg->host);
/**
* cxlflash_show_port_lun_table() - queries and presents the port LUN table
* @port: Desired port for status reporting.
- * @afu: AFU owning the specified port.
+ * @cfg: Internal structure associated with the host.
* @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
*
- * Return: The size of the ASCII string returned in @buf.
+ * Return: The size of the ASCII string returned in @buf or -EINVAL.
*/
static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct afu *afu,
+ struct cxlflash_cfg *cfg,
char *buf)
{
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
int i;
ssize_t bytes = 0;
__be64 __iomem *fc_port;
- if (port >= NUM_FC_PORTS)
- return 0;
+ WARN_ON(port >= MAX_FC_PORTS);
+
+ if (port >= cfg->num_fc_ports) {
+ dev_info(dev, "%s: Port %d not supported on this card.\n",
+ __func__, port);
+ return -EINVAL;
+ }
fc_port = &afu->afu_map->global.fc_port[port][0];
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_lun_table(0, afu, buf);
+ return cxlflash_show_port_lun_table(0, cfg, buf);
}
/**
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_lun_table(1, cfg, buf);
+}
+
+/**
+ * irqpoll_weight_show() - presents the current IRQ poll weight for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the IRQ poll weight.
+ * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
+ * weight in ASCII.
+ *
+ * An IRQ poll weight of 0 indicates polling is disabled.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t irqpoll_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
- return cxlflash_show_port_lun_table(1, afu, buf);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
+}
+
+/**
+ * irqpoll_weight_store() - sets the current IRQ poll weight for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the IRQ poll weight.
+ * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
+ * weight in ASCII.
+ * @count: Length of data resizing in @buf.
+ *
+ * An IRQ poll weight of 0 indicates polling is disabled.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t irqpoll_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+ struct device *cfgdev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ u32 weight;
+ int rc;
+
+ rc = kstrtouint(buf, 10, &weight);
+ if (rc)
+ return -EINVAL;
+
+ if (weight > 256) {
+ dev_info(cfgdev,
+ "Invalid IRQ poll weight. It must be 256 or less.\n");
+ return -EINVAL;
+ }
+
+ if (weight == afu->irqpoll_weight) {
+ dev_info(cfgdev,
+ "Current IRQ poll weight has the same weight.\n");
+ return -EINVAL;
+ }
+
+ if (afu_is_irqpoll_enabled(afu))
+ irq_poll_disable(&afu->irqpoll);
+
+ afu->irqpoll_weight = weight;
+
+ if (weight > 0)
+ irq_poll_init(&afu->irqpoll, weight, cxlflash_irqpoll);
+
+ return count;
}
/**
static DEVICE_ATTR_RO(ioctl_version);
static DEVICE_ATTR_RO(port0_lun_table);
static DEVICE_ATTR_RO(port1_lun_table);
+static DEVICE_ATTR_RW(irqpoll_weight);
static struct device_attribute *cxlflash_host_attrs[] = {
&dev_attr_port0,
&dev_attr_ioctl_version,
&dev_attr_port0_lun_table,
&dev_attr_port1_lun_table,
+ &dev_attr_irqpoll_weight,
NULL
};
struct device *dev = &pdev->dev;
struct dev_dependent_vals *ddv;
int rc = 0;
+ int k;
dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
__func__, pdev->irq);
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->num_fc_ports = NUM_FC_PORTS;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
- * The promoted LUNs move to the top of the LUN table. The rest stay
- * on the bottom half. The bottom half grows from the end
- * (index = 255), whereas the top half grows from the beginning
- * (index = 0).
+ * Promoted LUNs move to the top of the LUN table. The rest stay on
+ * the bottom half. The bottom half grows from the end (index = 255),
+ * whereas the top half grows from the beginning (index = 0).
+ *
+ * Initialize the last LUN index for all possible ports.
*/
- cfg->promote_lun_index = 0;
- cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
- cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
+ cfg->promote_lun_index = 0;
+
+ for (k = 0; k < MAX_FC_PORTS; k++)
+ cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
cfg->dev_id = (struct pci_device_id *)dev_id;