Pointed out by Pete Zaitcev.
Signed-off-by: Kristian Høgsberg <krh@redhat.com>
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
enum dma_data_direction direction)
{
struct page *page;
- int i;
+ int i, j;
void *p;
ctx->buffer_size = PAGE_ALIGN(size);
ctx->buffer = vmalloc_32_user(ctx->buffer_size);
if (ctx->buffer == NULL)
- return -ENOMEM;
+ goto fail_buffer_alloc;
ctx->page_count = ctx->buffer_size >> PAGE_SHIFT;
ctx->pages =
kzalloc(ctx->page_count * sizeof(ctx->pages[0]), GFP_KERNEL);
- if (ctx->pages == NULL) {
- vfree(ctx->buffer);
- return -ENOMEM;
- }
+ if (ctx->pages == NULL)
+ goto fail_pages_alloc;
p = ctx->buffer;
for (i = 0; i < ctx->page_count; i++, p += PAGE_SIZE) {
page = vmalloc_to_page(p);
ctx->pages[i] = dma_map_page(ctx->card->device,
page, 0, PAGE_SIZE, direction);
+ if (dma_mapping_error(ctx->pages[i]))
+ goto fail_mapping;
}
return 0;
+
+ fail_mapping:
+ for (j = 0; j < i; j++)
+ dma_unmap_page(ctx->card->device, ctx->pages[j],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ fail_pages_alloc:
+ vfree(ctx->buffer);
+ fail_buffer_alloc:
+ return -ENOMEM;
}
static void destroy_iso_buffer(struct fw_iso_context *ctx)
packet->payload,
packet->payload_length,
DMA_TO_DEVICE);
- if (packet->payload_bus == 0) {
+ if (dma_mapping_error(packet->payload_bus)) {
complete_transmission(packet, RCODE_SEND_ERROR, list);
return;
}
ctx->descriptor_bus =
dma_map_single(ohci->card.device, &ctx->d,
sizeof ctx->d, DMA_TO_DEVICE);
- if (ctx->descriptor_bus == 0)
+ if (dma_mapping_error(ctx->descriptor_bus))
return -ENOMEM;
ctx->regs = regs;
tasklet_init(&ctx->tasklet, tasklet, (unsigned long)ctx);
ctx->buffer = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
- if (ctx->buffer == NULL) {
- spin_lock_irqsave(&ohci->lock, flags);
- *mask |= 1 << index;
- spin_unlock_irqrestore(&ohci->lock, flags);
- return ERR_PTR(-ENOMEM);
- }
+ if (ctx->buffer == NULL)
+ goto buffer_alloc_failed;
ctx->buffer_bus =
dma_map_single(card->device, ctx->buffer,
ISO_BUFFER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->buffer_bus))
+ goto buffer_map_failed;
ctx->head_descriptor = ctx->buffer;
ctx->prev_descriptor = ctx->buffer;
ctx->head_descriptor++;
return &ctx->base;
+
+ buffer_map_failed:
+ kfree(ctx->buffer);
+ buffer_alloc_failed:
+ spin_lock_irqsave(&ohci->lock, flags);
+ *mask |= 1 << index;
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return ERR_PTR(-ENOMEM);
}
static int ohci_send_iso(struct fw_iso_context *base, s32 cycle)
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof orb->request, DMA_TO_DEVICE);
- if (orb->base.request_bus == 0)
+ if (dma_mapping_error(orb->base.request_bus))
goto out;
orb->response_bus =
dma_map_single(device->card->device, &orb->response,
sizeof orb->response, DMA_FROM_DEVICE);
- if (orb->response_bus == 0)
+ if (dma_mapping_error(orb->response_bus))
goto out;
orb->request.response.high = 0;
* transfer direction not handled. */
if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
- cmd->result = DID_ERROR << 16;
- done(cmd);
- return 0;
+ goto fail_alloc;
}
orb = kzalloc(sizeof *orb, GFP_ATOMIC);
if (orb == NULL) {
fw_notify("failed to alloc orb\n");
- cmd->result = DID_NO_CONNECT << 16;
- done(cmd);
- return 0;
+ goto fail_alloc;
}
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof orb->request, DMA_TO_DEVICE);
+ if (dma_mapping_error(orb->base.request_bus))
+ goto fail_mapping;
orb->unit = unit;
orb->done = done;
* could we get the scsi or blk layer to do that by
* reporting our max supported block size? */
fw_error("command > 64k\n");
- cmd->result = DID_ERROR << 16;
- done(cmd);
- return 0;
+ goto fail_bufflen;
} else if (cmd->request_bufflen > 0) {
sbp2_command_orb_map_buffer(orb);
}
sd->command_block_agent_address + SBP2_ORB_POINTER);
return 0;
+
+ fail_bufflen:
+ dma_unmap_single(device->card->device, orb->base.request_bus,
+ sizeof orb->request, DMA_TO_DEVICE);
+ fail_mapping:
+ kfree(orb);
+ fail_alloc:
+ cmd->result = DID_ERROR << 16;
+ done(cmd);
+ return 0;
}
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)