]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
sfc: Added and removed braces to comply with kernel style
authorBen Hutchings <bhutchings@solarflare.com>
Fri, 16 May 2008 20:15:49 +0000 (21:15 +0100)
committerJeff Garzik <jgarzik@redhat.com>
Thu, 22 May 2008 09:59:26 +0000 (05:59 -0400)
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/sfc/rx.c
drivers/net/sfc/tx.c

index 3494f4cd314e711bbf076040d8dda28d1fbba388..df19e86ab2e71caa3a08e3982f48f7b4b1f4587e 100644 (file)
@@ -1060,9 +1060,8 @@ static void efx_flush_all(struct efx_nic *efx)
        cancel_delayed_work_sync(&efx->monitor_work);
 
        /* Ensure that all RX slow refills are complete. */
-       efx_for_each_rx_queue(rx_queue, efx) {
+       efx_for_each_rx_queue(rx_queue, efx)
                cancel_delayed_work_sync(&rx_queue->work);
-       }
 
        /* Stop scheduled port reconfigurations */
        cancel_work_sync(&efx->reconfigure_work);
@@ -1088,9 +1087,10 @@ static void efx_stop_all(struct efx_nic *efx)
        falcon_disable_interrupts(efx);
        if (efx->legacy_irq)
                synchronize_irq(efx->legacy_irq);
-       efx_for_each_channel_with_interrupt(channel, efx)
+       efx_for_each_channel_with_interrupt(channel, efx) {
                if (channel->irq)
                        synchronize_irq(channel->irq);
+       }
 
        /* Stop all NAPI processing and synchronous rx refills */
        efx_for_each_channel(channel, efx)
index c58f8a3443cc24b84e78a2b0dc3b61f797216f59..4f96ce4c3532a845d636fdcb6c4fd38bb79ad749 100644 (file)
@@ -1636,9 +1636,10 @@ void falcon_fini_interrupt(struct efx_nic *efx)
        efx_oword_t reg;
 
        /* Disable MSI/MSI-X interrupts */
-       efx_for_each_channel_with_interrupt(channel, efx)
+       efx_for_each_channel_with_interrupt(channel, efx) {
                if (channel->irq)
                        free_irq(channel->irq, channel);
+       }
 
        /* ACK legacy interrupt */
        if (FALCON_REV(efx) >= FALCON_REV_B0)
index 670622373ddf027b1456ad17b69132331332396f..a6413309c5772e7bf9931b8e0cf13a2e86b55e1a 100644 (file)
@@ -400,9 +400,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
                return 0;
 
        /* Record minimum fill level */
-       if (unlikely(fill_level < rx_queue->min_fill))
+       if (unlikely(fill_level < rx_queue->min_fill)) {
                if (fill_level)
                        rx_queue->min_fill = fill_level;
+       }
 
        /* Acquire RX add lock.  If this lock is contended, then a fast
         * fill must already be in progress (e.g. in the refill
index 9b436f5b48889148792134f397830ea4716a17fb..75eb0fd5fd2b7e97b0a79e2621eba72b784b7de4 100644 (file)
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
        base_dma = tsoh->dma_addr & PAGE_MASK;
 
        p = &tx_queue->tso_headers_free;
-       while (*p != NULL)
+       while (*p != NULL) {
                if (((unsigned long)*p & PAGE_MASK) == base_kva)
                        *p = (*p)->next;
                else
                        p = &(*p)->next;
+       }
 
        pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
 }
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 
        /* Allocate a DMA-mapped header buffer. */
        if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
-               if (tx_queue->tso_headers_free == NULL)
+               if (tx_queue->tso_headers_free == NULL) {
                        if (efx_tsoh_block_alloc(tx_queue))
                                return -1;
+               }
                EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
                tsoh = tx_queue->tso_headers_free;
                tx_queue->tso_headers_free = tsoh->next;
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
 {
        unsigned i;
 
-       if (tx_queue->buffer)
+       if (tx_queue->buffer) {
                for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
                        efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+       }
 
        while (tx_queue->tso_headers_free != NULL)
                efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,