#define WATCH_INTERVAL 1 /* number of seconds */
+/* Work events. */
+enum qla_work_type {
+ QLA_EVT_AEN,
+};
+
+
+struct qla_work_evt {
+ struct list_head list;
+ enum qla_work_type type;
+ u32 flags;
+#define QLA_EVT_FLAG_FREE 0x1
+
+ union {
+ struct {
+ enum fc_host_event_code code;
+ u32 data;
+ } aen;
+ } u;
+};
+
/*
* Linux Host Adapter structure
*/
uint32_t login_retry_count;
int max_q_depth;
+ struct list_head work_list;
+
/* Fibre Channel Device List. */
struct list_head fcports;
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
ha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
break;
case MBA_LOOP_UP: /* Loop Up Event */
link_speed);
ha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
ha->link_data_rate = PORT_SPEED_UNKNOWN;
if (ql2xfdmienable)
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+ qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
break;
case MBA_LIP_RESET: /* LIP reset occurred */
ha->operating_mode = LOOP;
ha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
break;
case MBA_POINT_TO_POINT: /* Point-to-Point */
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
+ qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
break;
/* case MBA_RIO_RESPONSE: */
INIT_LIST_HEAD(&ha->list);
INIT_LIST_HEAD(&ha->fcports);
INIT_LIST_HEAD(&ha->vp_list);
+ INIT_LIST_HEAD(&ha->work_list);
set_bit(0, (unsigned long *) ha->vp_idx_map);
kfree(ha->nvram);
}
+struct qla_work_evt *
+qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
+ int locked)
+{
+ struct qla_work_evt *e;
+
+ e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC:
+ GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ INIT_LIST_HEAD(&e->list);
+ e->type = type;
+ e->flags = QLA_EVT_FLAG_FREE;
+ return e;
+}
+
+int
+qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
+{
+ unsigned long flags;
+
+ if (!locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&e->list, &ha->work_list);
+ qla2xxx_wake_dpc(ha);
+ if (!locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+}
+
+int
+qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code,
+ u32 data)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.aen.code = code;
+ e->u.aen.data = data;
+ return qla2x00_post_work(ha, e, 1);
+}
+
+static void
+qla2x00_do_work(struct scsi_qla_host *ha)
+{
+ struct qla_work_evt *e;
+
+ spin_lock_irq(&ha->hardware_lock);
+ while (!list_empty(&ha->work_list)) {
+ e = list_entry(ha->work_list.next, struct qla_work_evt, list);
+ list_del_init(&e->list);
+ spin_unlock_irq(&ha->hardware_lock);
+
+ switch (e->type) {
+ case QLA_EVT_AEN:
+ fc_host_post_event(ha->host, fc_get_event_number(),
+ e->u.aen.code, e->u.aen.data);
+ break;
+ }
+ if (e->flags & QLA_EVT_FLAG_FREE)
+ kfree(e);
+ spin_lock_irq(&ha->hardware_lock);
+ }
+ spin_unlock_irq(&ha->hardware_lock);
+}
+
/**************************************************************************
* qla2x00_do_dpc
* This kernel thread is a task that is schedule by the interrupt handler
continue;
}
+ qla2x00_do_work(ha);
+
if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
DEBUG(printk("scsi(%ld): dpc: sched "