/* create user entries for this device */
tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
- if (tmp)
+ if (tmp && (tmp != i2o_dev))
sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
"user");
/* create user entries refering to this device */
list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
sysfs_create_link(&tmp->device.kobj,
&i2o_dev->device.kobj, "user");
/* create parent entries for this device */
tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
- if (tmp)
+ if (tmp && (tmp != i2o_dev))
sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
"parent");
/* create parent entries refering to this device */
list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
sysfs_create_link(&tmp->device.kobj,
&i2o_dev->device.kobj, "parent");
#include <linux/rwsem.h>
#include <linux/i2o.h>
+#define OSM_NAME "core"
+
/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */
unsigned int i2o_max_drivers = I2O_MAX_DRIVERS;
module_param_named(max_drivers, i2o_max_drivers, uint, 0);
struct i2o_driver *drv;
u32 context = readl(&msg->u.s.icntxt);
- if (likely(context < i2o_max_drivers)) {
- spin_lock(&i2o_drivers_lock);
- drv = i2o_drivers[context];
- spin_unlock(&i2o_drivers_lock);
-
- if (unlikely(!drv)) {
- printk(KERN_WARNING "%s: Spurious reply to unknown "
- "driver %d\n", c->name, context);
- return -EIO;
- }
+ if (unlikely(context >= i2o_max_drivers)) {
+ printk(KERN_WARNING "%s: Spurious reply to unknown driver "
+ "%d\n", c->name, readl(&msg->u.s.icntxt));
+ return -EIO;
+ }
- if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) {
- struct i2o_device *dev, *tmp;
- struct i2o_event *evt;
- u16 size;
- u16 tid;
+ spin_lock(&i2o_drivers_lock);
+ drv = i2o_drivers[context];
+ spin_unlock(&i2o_drivers_lock);
- tid = readl(&msg->u.head[1]) & 0x1fff;
+ if (unlikely(!drv)) {
+ osm_warn("Spurious reply to unknown driver %d\n", context);
+ return -EIO;
+ }
- pr_debug("%s: event received from device %d\n", c->name,
- tid);
+ if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) {
+ struct i2o_device *dev, *tmp;
+ struct i2o_event *evt;
+ u16 size;
+ u16 tid = readl(&msg->u.head[1]) & 0xfff;
- /* cut of header from message size (in 32-bit words) */
- size = (readl(&msg->u.head[0]) >> 16) - 5;
+ osm_debug("event received from device %d\n", tid);
- evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
- if (!evt)
- return -ENOMEM;
- memset(evt, 0, size * 4 + sizeof(*evt));
+ /* cut of header from message size (in 32-bit words) */
+ size = (readl(&msg->u.head[0]) >> 16) - 5;
- evt->size = size;
- memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt,
- (size + 2) * 4);
+ evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO);
+ if (!evt)
+ return -ENOMEM;
- list_for_each_entry_safe(dev, tmp, &c->devices, list)
- if (dev->lct_data.tid == tid) {
- evt->i2o_dev = dev;
- break;
- }
+ evt->size = size;
+ evt->tcntxt = readl(&msg->u.s.tcntxt);
+ evt->event_indicator = readl(&msg->body[0]);
+ memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt, size * 4);
- INIT_WORK(&evt->work, (void (*)(void *))drv->event,
- evt);
- queue_work(drv->event_queue, &evt->work);
- return 1;
+ list_for_each_entry_safe(dev, tmp, &c->devices, list)
+ if (dev->lct_data.tid == tid) {
+ evt->i2o_dev = dev;
+ break;
}
- if (likely(drv->reply))
- return drv->reply(c, m, msg);
- else
- pr_debug("%s: Reply to driver %s, but no reply function"
- " defined!\n", c->name, drv->name);
+ INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+ queue_work(drv->event_queue, &evt->work);
+ return 1;
+ }
+
+ if (unlikely(!drv->reply)) {
+ pr_debug("%s: Reply to driver %s, but no reply function"
+ " defined!\n", c->name, drv->name);
return -EIO;
- } else
- printk(KERN_WARNING "%s: Spurious reply to unknown driver "
- "%d\n", c->name, readl(&msg->u.s.icntxt));
- return -EIO;
+ }
+
+ return drv->reply(c, m, msg);
}
/**
struct i2o_message __iomem *msg)
{
struct i2o_exec_wait *wait, *tmp;
- static spinlock_t lock;
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
int rc = 1;
u32 context;
- spin_lock_init(&lock);
-
context = readl(&msg->u.s.tcntxt);
/*
*/
static void i2o_exec_event(struct i2o_event *evt)
{
- osm_info("Event received from device: %d\n",
- evt->i2o_dev->lct_data.tid);
+ if(likely(evt->i2o_dev))
+ osm_info("Event received from device: %d\n",
+ evt->i2o_dev->lct_data.tid);
kfree(evt);
};
u32 sg_offset = 0;
u32 sg_count = 0;
u32 i = 0;
+ u32 sg_index = 0;
i2o_status_block *sb;
struct i2o_message *msg;
u32 m;
if (sg_count > SG_TABLESIZE) {
printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
c->name, sg_count);
- kfree(reply);
- return -EINVAL;
+ rcode = -EINVAL;
+ goto cleanup;
}
for (i = 0; i < sg_count; i++) {
goto cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
- p = &(sg_list[i]);
+ p = &(sg_list[sg_index++]);
/* Allocate memory for the transfer */
if (i2o_dma_alloc
(&c->pdev->dev, p, sg_size,
"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
c->name, sg_size, i, sg_count);
rcode = -ENOMEM;
- goto cleanup;
+ goto sg_list_cleanup;
}
/* Copy in the user's SG buffer if necessary */
if (sg[i].
"%s: Could not copy SG buf %d FROM user\n",
c->name, i);
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
}
//TODO 64bit fix
rcode = i2o_msg_post_wait(c, m, 60);
if (rcode)
- goto cleanup;
+ goto sg_list_cleanup;
if (sg_offset) {
- u32 msg[128];
+ u32 msg[MSG_FRAME_SIZE];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size)) {
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
c->name, sg_list[j].virt,
sg[j].addr_bus);
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
}
}
"%s: Could not copy message context FROM user\n",
c->name);
rcode = -EFAULT;
+ goto sg_list_cleanup;
}
if (copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING
}
}
+ sg_list_cleanup:
+ for (i = 0; i < sg_index; i++)
+ i2o_dma_free(&c->pdev->dev, &sg_list[i]);
+
cleanup:
kfree(reply);
return rcode;
if (sg_count > SG_TABLESIZE) {
printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
c->name, sg_count);
- kfree(reply);
- return -EINVAL;
+ rcode = -EINVAL;
+ goto cleanup;
}
for (i = 0; i < sg_count; i++) {
"%s:Bad SG element %d - not simple (%x)\n",
c->name, i, sg[i].flag_count);
rcode = -EINVAL;
- goto cleanup;
+ goto sg_list_cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
c->name, sg_size, i, sg_count);
rcode = -ENOMEM;
- goto cleanup;
+ goto sg_list_cleanup;
}
sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
/* Copy in the user's SG buffer if necessary */
"%s: Could not copy SG buf %d FROM user\n",
c->name, i);
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
}
//TODO 64bit fix
rcode = i2o_msg_post_wait(c, m, 60);
if (rcode)
- goto cleanup;
+ goto sg_list_cleanup;
if (sg_offset) {
u32 msg[128];
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size)) {
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
sg_count =
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
c->name, sg_list[j],
sg[j].addr_bus);
rcode = -EFAULT;
- goto cleanup;
+ goto sg_list_cleanup;
}
}
}
}
}
+ sg_list_cleanup:
+ for (i = 0; i < sg_index; i++)
+ kfree(sg_list[i]);
+
cleanup:
kfree(reply);
return rcode;
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
- if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* SCSI bus */
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ && (type == 0x01)) /* SCSI bus */
max_channel++;
}
#include <linux/interrupt.h>
#include <linux/i2o.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif // CONFIG_MTRR
-
/* Module internal functions from other sources */
extern struct i2o_controller *i2o_iop_alloc(void);
extern void i2o_iop_free(struct i2o_controller *);
static struct pci_device_id __devinitdata i2o_pci_ids[] = {
{PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
{PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
+ {.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
+ .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID},
{0}
};
i2o_dma_free(dev, &c->hrt);
i2o_dma_free(dev, &c->status);
-#ifdef CONFIG_MTRR
- if (c->mtrr_reg0 >= 0)
- mtrr_del(c->mtrr_reg0, 0, 0);
- if (c->mtrr_reg1 >= 0)
- mtrr_del(c->mtrr_reg1, 0, 0);
-#endif
-
if (c->raptor && c->in_queue.virt)
iounmap(c->in_queue.virt);
c->name, (unsigned long)c->base.phys,
(unsigned long)c->base.len);
- c->base.virt = ioremap(c->base.phys, c->base.len);
+ c->base.virt = ioremap_nocache(c->base.phys, c->base.len);
if (!c->base.virt) {
printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
return -ENOMEM;
}
if (c->raptor) {
- c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len);
+ c->in_queue.virt =
+ ioremap_nocache(c->in_queue.phys, c->in_queue.len);
if (!c->in_queue.virt) {
printk(KERN_ERR "%s: Unable to map controller.\n",
c->name);
c->post_port = c->base.virt + 0x40;
c->reply_port = c->base.virt + 0x44;
-#ifdef CONFIG_MTRR
- /* Enable Write Combining MTRR for IOP's memory region */
- c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len,
- MTRR_TYPE_WRCOMB, 1);
- c->mtrr_reg1 = -1;
-
- if (c->mtrr_reg0 < 0)
- printk(KERN_WARNING "%s: could not enable write combining "
- "MTRR\n", c->name);
- else
- printk(KERN_INFO "%s: using write combining MTRR\n", c->name);
-
- /*
- * If it is an INTEL i960 I/O processor then set the first 64K to
- * Uncacheable since the region contains the messaging unit which
- * shouldn't be cached.
- */
- if ((pdev->vendor == PCI_VENDOR_ID_INTEL ||
- pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) {
- printk(KERN_INFO "%s: MTRR workaround for Intel i960 processor"
- "\n", c->name);
- c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000,
- MTRR_TYPE_UNCACHABLE, 1);
-
- if (c->mtrr_reg1 < 0) {
- printk(KERN_WARNING "%s: Error in setting "
- "MTRR_TYPE_UNCACHABLE\n", c->name);
- mtrr_del(c->mtrr_reg0, c->in_queue.phys,
- c->in_queue.len);
- c->mtrr_reg0 = -1;
- }
- }
-#endif
-
if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
i2o_pci_free(c);
return -ENOMEM;
{
struct i2o_controller *c;
int rc;
+ struct pci_dev *i960 = NULL;
printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
if ((pdev->class & 0xff) > 1) {
- printk(KERN_WARNING "i2o: I2O controller found but does not "
- "support I2O 1.5 (skipping).\n");
+ printk(KERN_WARNING "i2o: %s does not support I2O 1.5 "
+ "(skipping).\n", pci_name(pdev));
return -ENODEV;
}
if ((rc = pci_enable_device(pdev))) {
- printk(KERN_WARNING "i2o: I2O controller found but could not be"
- " enabled.\n");
+ printk(KERN_WARNING "i2o: couldn't enable device %s\n",
+ pci_name(pdev));
return rc;
}
- printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n",
- pdev->bus->number, pdev->devfn);
-
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No "
- "suitable DMA available!\n", pdev->bus->number,
- pdev->devfn);
+ printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
+ pci_name(pdev));
rc = -ENODEV;
goto disable;
}
c = i2o_iop_alloc();
if (IS_ERR(c)) {
- printk(KERN_ERR "i2o: memory for I2O controller could not be "
- "allocated\n");
+ printk(KERN_ERR "i2o: couldn't allocate memory for %s\n",
+ pci_name(pdev));
rc = PTR_ERR(c);
goto disable;
- }
+ } else
+ printk(KERN_INFO "%s: controller found (%s)\n", c->name,
+ pci_name(pdev));
c->pdev = pdev;
c->device = pdev->dev;
}
if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
+ /*
+ * Expose the ship behind i960 for initialization, or it will
+ * failed
+ */
+ i960 =
+ pci_find_slot(c->pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
+
+ if (i960)
+ pci_write_config_word(i960, 0x42, 0);
+
c->promise = 1;
- printk(KERN_INFO "%s: Promise workarounds activated.\n",
- c->name);
}
/* Cards that go bananas if you quiesce them before you reset them. */
if ((rc = i2o_iop_add(c)))
goto uninstall;
+ if (i960)
+ pci_write_config_word(i960, 0x42, 0x03ff);
+
return 0;
uninstall:
#define MAX_I2O_CONTROLLERS 32
//#include <linux/ioctl.h>
+#ifndef __KERNEL__
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+#endif /* __KERNEL__ */
+
/*
* I2O Control IOCTLs and structures
#define I2O_BUS_CARDBUS 7
#define I2O_BUS_UNKNOWN 0x80
-#ifndef __KERNEL__
-
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned int u32;
-
-#endif /* __KERNEL__ */
-
typedef struct _i2o_pci_bus {
u8 PciFunctionNumber;
u8 PciDeviceNumber;
unsigned int raptor:1; /* split bar */
unsigned int promise:1; /* Promise controller */
-#ifdef CONFIG_MTRR
- int mtrr_reg0;
- int mtrr_reg1;
-#endif
-
struct list_head devices; /* list of I2O devices */
struct notifier_block *event_notifer; /* Events */