]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 23 May 2011 15:24:09 +0000 (08:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 23 May 2011 15:24:09 +0000 (08:24 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes:
  GFS2: Wait properly when flushing the ail list
  GFS2: Wipe directory hash table metadata when deallocating a directory

187 files changed:
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/unistd.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/head.S
arch/parisc/kernel/module.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/sys_parisc32.c
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/mm/init.c
arch/sparc/Kconfig
arch/sparc/include/asm/cpudata_32.h
arch/sparc/include/asm/floppy_32.h
arch/sparc/include/asm/io.h
arch/sparc/include/asm/irq_32.h
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/pcic.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/setup.h
arch/sparc/include/asm/smp_32.h
arch/sparc/include/asm/smp_64.h
arch/sparc/include/asm/spinlock_32.h
arch/sparc/include/asm/system_32.h
arch/sparc/include/asm/system_64.h
arch/sparc/include/asm/winmacro.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/devices.c
arch/sparc/kernel/ds.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/head_32.S
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq.h
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/of_device_64.c
arch/sparc/kernel/pci_msi.c
arch/sparc/kernel/pcic.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/prom_32.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sun4c_irq.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4m_irq.c
arch/sparc/kernel/sun4m_smp.c
arch/sparc/kernel/sysfs.c
arch/sparc/kernel/time_32.c
arch/sparc/kernel/us2e_cpufreq.c
arch/sparc/kernel/us3_cpufreq.c
arch/sparc/lib/Makefile
arch/sparc/lib/rwsem_32.S [deleted file]
arch/sparc/mm/init_64.c
arch/x86/kernel/cpu/common.c
drivers/block/drbd/drbd_int.h
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma_v2.c
drivers/dma/ioat/dma_v3.c
drivers/firewire/core-card.c
drivers/firewire/core-cdev.c
drivers/firewire/core-device.c
drivers/firewire/core-iso.c
drivers/firewire/core-transaction.c
drivers/firewire/core.h
drivers/firewire/net.c
drivers/firewire/ohci.c
drivers/firewire/sbp2.c
drivers/ide/ide-acpi.c
drivers/ide/ide-floppy.c
drivers/ide/ide-scan-pci.c
drivers/ide/pmac.c
drivers/infiniband/hw/amso1100/c2.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/multipath.h
drivers/md/raid1.c
drivers/md/raid1.h
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/firewire/firedtv-avc.c
drivers/media/dvb/firewire/firedtv-fw.c
drivers/net/benet/be_main.c
drivers/net/bna/bnad.c
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/chelsio/sge.c
drivers/net/cnic.c
drivers/net/cxgb3/sge.c
drivers/net/cxgb4/sge.c
drivers/net/cxgb4vf/sge.c
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/netdev.c
drivers/net/ehea/ehea_qmr.h
drivers/net/enic/enic_main.c
drivers/net/forcedeth.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/ixgb/ixgb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/myri10ge/myri10ge.c
drivers/net/pasemi_mac.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/qla3xxx.c
drivers/net/qlge/qlge_main.c
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/sb1250-mac.c
drivers/net/sfc/rx.c
drivers/net/skge.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tc35815.c
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-traffic.c
drivers/video/Kconfig
drivers/video/via/Makefile
drivers/video/via/chip.h
drivers/video/via/dvi.c
drivers/video/via/dvi.h
drivers/video/via/global.c
drivers/video/via/global.h
drivers/video/via/hw.c
drivers/video/via/hw.h
drivers/video/via/lcd.c
drivers/video/via/lcd.h
drivers/video/via/share.h
drivers/video/via/via-core.c
drivers/video/via/via_clock.c [new file with mode: 0644]
drivers/video/via/via_clock.h [new file with mode: 0644]
drivers/video/via/viafbdev.c
drivers/video/via/viafbdev.h
drivers/video/via/viamode.c
drivers/video/via/viamode.h
fs/inode.c
fs/nilfs2/alloc.c
fs/nilfs2/bmap.c
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/cpfile.c
fs/nilfs2/dat.c
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/sufile.h
fs/nilfs2/super.c
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
include/linux/firewire.h
include/linux/ide.h
include/linux/nilfs2_fs.h
include/linux/skbuff.h
include/net/mac80211.h
init/Kconfig
kernel/sched.c
net/ipv4/fib_trie.c
net/netlabel/netlabel_addrlist.h
sound/firewire/amdtp.c
sound/firewire/cmp.c
sound/firewire/iso-resources.c
sound/firewire/iso-resources.h
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf

index d18328b3f9386c0477e61340f44e995888a1c47e..da601dd34c057bb7499be84da84bc97981d67b40 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <asm/tlbflush.h>
 
 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  * Unfortunately, that doesn't apply to PA-RISC. */
@@ -112,8 +113,10 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 static inline void
 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 {
-       if (PageAnon(page))
+       if (PageAnon(page)) {
+               flush_tlb_page(vma, vmaddr);
                flush_dcache_page_asm(page_to_phys(page), vmaddr);
+       }
 }
 
 #ifdef CONFIG_DEBUG_RODATA
index 5d7b8ce9fdf36507c8c541d76f4e176f9db01d94..22dadeb58695d99a81a718f864ce5b2b42b2474f 100644 (file)
@@ -177,7 +177,10 @@ struct vm_area_struct;
 
 #define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_KERNEL   (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_KERNEL_RO        (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_KERNEL_EXEC      (_PAGE_KERNEL_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX       (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
+#define _PAGE_KERNEL           (_PAGE_KERNEL_RO | _PAGE_WRITE)
 
 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
@@ -208,7 +211,9 @@ struct vm_area_struct;
 #define PAGE_COPY       PAGE_EXECREAD
 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
 #define PAGE_KERNEL    __pgprot(_PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+#define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RWX        __pgprot(_PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
 #define PAGE_KERNEL_UNC        __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
 #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
 
index 3eb82c2a5ec338973d70daf0628a4fbcb8bcacec..9cbc2c3bf630bea422fcafb546e419d7d88b8b44 100644 (file)
 #define __NR_recvmmsg          (__NR_Linux + 319)
 #define __NR_accept4           (__NR_Linux + 320)
 #define __NR_prlimit64         (__NR_Linux + 321)
-
-#define __NR_Linux_syscalls    (__NR_prlimit64 + 1)
+#define __NR_fanotify_init     (__NR_Linux + 322)
+#define __NR_fanotify_mark     (__NR_Linux + 323)
+#define __NR_clock_adjtime     (__NR_Linux + 324)
+#define __NR_name_to_handle_at (__NR_Linux + 325)
+#define __NR_open_by_handle_at (__NR_Linux + 326)
+#define __NR_syncfs            (__NR_Linux + 327)
+
+#define __NR_Linux_syscalls    (__NR_syncfs + 1)
 
 
 #define __IGNORE_select                /* newselect */
index 3f11331c27755f835a228cfdc91e5f05cf808042..83335f3da5fc81d8327c46ed1d90e72e2d46fc2f 100644 (file)
@@ -304,10 +304,20 @@ void flush_dcache_page(struct page *page)
                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
                addr = mpnt->vm_start + offset;
 
+               /* The TLB is the engine of coherence on parisc: The
+                * CPU is entitled to speculate any page with a TLB
+                * mapping, so here we kill the mapping then flush the
+                * page along a special flush only alias mapping.
+                * This guarantees that the page is no-longer in the
+                * cache for any process and nor may it be
+                * speculatively read in (until the user or kernel
+                * specifically accesses it, of course) */
+
+               flush_tlb_page(mpnt, addr);
                if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
                        __flush_cache_page(mpnt, addr, page_to_phys(page));
                        if (old_addr)
-                               printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+                               printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
                        old_addr = addr;
                }
        }
@@ -499,6 +509,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 {
        BUG_ON(!vma->vm_mm->context);
 
+       flush_tlb_page(vma, vmaddr);
        __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
 
 }
index ead8d2a1034c534dfecfdea7b1748e4e73ceafe9..6f05944391439bcf88ac7761616bd94b24669fcd 100644 (file)
@@ -692,6 +692,9 @@ ENTRY(fault_vector_11)
 END(fault_vector_11)
 
 #endif
+       /* Fault vector is separately protected and *must* be on its own page */
+       .align          PAGE_SIZE
+ENTRY(end_fault_vector)
 
        .import         handle_interruption,code
        .import         do_cpu_irq_mask,code
index 145c5e4caaa05974467e22eb1a6b87c2c0691953..37aabd772fbb91c3d4f07210a2543638cf4ef159 100644 (file)
@@ -106,8 +106,9 @@ $bss_loop:
 #endif
 
 
-       /* Now initialize the PTEs themselves */
-       ldo             0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+       /* Now initialize the PTEs themselves.  We use RWX for
+        * everything ... it will get remapped correctly later */
+       ldo             0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
        ldi             (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
        load32          PA(pg0),%r1
 
index 6e81bb596e5b476e598e4a7309e4aba80ba0a322..cedbbb8b18d9508ef1539068355911cfda0be66a 100644 (file)
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/bug.h>
+#include <linux/mm.h>
 #include <linux/slab.h>
 
+#include <asm/pgtable.h>
 #include <asm/unwind.h>
 
 #if 0
@@ -214,7 +216,13 @@ void *module_alloc(unsigned long size)
 {
        if (size == 0)
                return NULL;
-       return vmalloc(size);
+       /* using RWX means less protection for modules, but it's
+        * easier than trying to map the text, data, init_text and
+        * init_data correctly */
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                                   GFP_KERNEL | __GFP_HIGHMEM,
+                                   PAGE_KERNEL_RWX, -1,
+                                   __builtin_return_address(0));
 }
 
 #ifndef CONFIG_64BIT
index a85823668cba041708285555e4cc31f2475f131f..93ff3d90edd1edc8a0d5d41ecc4adae27efe45f3 100644 (file)
@@ -817,10 +817,7 @@ ENTRY(purge_kernel_dcache_page)
        .procend
 ENDPROC(purge_kernel_dcache_page)
 
-
-       .export flush_user_dcache_range_asm
-
-flush_user_dcache_range_asm:
+ENTRY(flush_user_dcache_range_asm)
        .proc
        .callinfo NO_CALLS
        .entry
@@ -839,6 +836,7 @@ flush_user_dcache_range_asm:
        .exit
 
        .procend
+ENDPROC(flush_user_dcache_range_asm)
 
 ENTRY(flush_kernel_dcache_range_asm)
        .proc
index 88a0ad14a9c99a466c9aaaf6ba6459868a61d1fa..dc9a624623233e0e9f084e3659e65e37bf28df47 100644 (file)
@@ -228,3 +228,11 @@ asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
         return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
                              ((loff_t)lenhi << 32) | lenlo);
 }
+
+asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
+                                        u32 mask_lo, int fd,
+                                        const char __user *pathname)
+{
+       return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
+                                fd, pathname);
+}
index 4be85ee10b85dbdb0115f5d5bda2b2f789483abf..a5b02ce4d41eea6797b1c4ce5c24476f6764e0bd 100644 (file)
        ENTRY_COMP(recvmmsg)
        ENTRY_SAME(accept4)             /* 320 */
        ENTRY_SAME(prlimit64)
+       ENTRY_SAME(fanotify_init)
+       ENTRY_COMP(fanotify_mark)
+       ENTRY_COMP(clock_adjtime)
+       ENTRY_SAME(name_to_handle_at)   /* 325 */
+       ENTRY_COMP(open_by_handle_at)
+       ENTRY_SAME(syncfs)
 
        /* Nothing yet */
 
index 2d9a5c7c76f5142b2d6670a3a8a0b985644be723..e1a55849bfa7832d8b58fac498012cd8cc44a438 100644 (file)
@@ -137,6 +137,7 @@ SECTIONS
        . = ALIGN(16384);
        __init_begin = .;
        INIT_TEXT_SECTION(16384)
+       . = ALIGN(PAGE_SIZE);
        INIT_DATA_SECTION(16)
        /* we have to discard exit text and such at runtime, not link time */
        .exit.text :
index b1d126258dee10612054ac3bb1a472c562b37cbc..5fa1e273006e2a673ed62aecdb62127a7a5e1a6b 100644 (file)
@@ -371,24 +371,158 @@ static void __init setup_bootmem(void)
        request_resource(&sysram_resources[0], &pdcdata_resource);
 }
 
+static void __init map_pages(unsigned long start_vaddr,
+                            unsigned long start_paddr, unsigned long size,
+                            pgprot_t pgprot, int force)
+{
+       pgd_t *pg_dir;
+       pmd_t *pmd;
+       pte_t *pg_table;
+       unsigned long end_paddr;
+       unsigned long start_pmd;
+       unsigned long start_pte;
+       unsigned long tmp1;
+       unsigned long tmp2;
+       unsigned long address;
+       unsigned long vaddr;
+       unsigned long ro_start;
+       unsigned long ro_end;
+       unsigned long fv_addr;
+       unsigned long gw_addr;
+       extern const unsigned long fault_vector_20;
+       extern void * const linux_gateway_page;
+
+       ro_start = __pa((unsigned long)_text);
+       ro_end   = __pa((unsigned long)&data_start);
+       fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
+       gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
+
+       end_paddr = start_paddr + size;
+
+       pg_dir = pgd_offset_k(start_vaddr);
+
+#if PTRS_PER_PMD == 1
+       start_pmd = 0;
+#else
+       start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+#endif
+       start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+
+       address = start_paddr;
+       vaddr = start_vaddr;
+       while (address < end_paddr) {
+#if PTRS_PER_PMD == 1
+               pmd = (pmd_t *)__pa(pg_dir);
+#else
+               pmd = (pmd_t *)pgd_address(*pg_dir);
+
+               /*
+                * pmd is physical at this point
+                */
+
+               if (!pmd) {
+                       pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
+                       pmd = (pmd_t *) __pa(pmd);
+               }
+
+               pgd_populate(NULL, pg_dir, __va(pmd));
+#endif
+               pg_dir++;
+
+               /* now change pmd to kernel virtual addresses */
+
+               pmd = (pmd_t *)__va(pmd) + start_pmd;
+               for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
+
+                       /*
+                        * pg_table is physical at this point
+                        */
+
+                       pg_table = (pte_t *)pmd_address(*pmd);
+                       if (!pg_table) {
+                               pg_table = (pte_t *)
+                                       alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
+                               pg_table = (pte_t *) __pa(pg_table);
+                       }
+
+                       pmd_populate_kernel(NULL, pmd, __va(pg_table));
+
+                       /* now change pg_table to kernel virtual addresses */
+
+                       pg_table = (pte_t *) __va(pg_table) + start_pte;
+                       for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
+                               pte_t pte;
+
+                               /*
+                                * Map the fault vector writable so we can
+                                * write the HPMC checksum.
+                                */
+                               if (force)
+                                       pte =  __mk_pte(address, pgprot);
+                               else if (core_kernel_text(vaddr) &&
+                                        address != fv_addr)
+                                       pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+                               else
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+                               if (address >= ro_start && address < ro_end
+                                                       && address != fv_addr
+                                                       && address != gw_addr)
+                                       pte = __mk_pte(address, PAGE_KERNEL_RO);
+                               else
+#endif
+                                       pte = __mk_pte(address, pgprot);
+
+                               if (address >= end_paddr) {
+                                       if (force)
+                                               break;
+                                       else
+                                               pte_val(pte) = 0;
+                               }
+
+                               set_pte(pg_table, pte);
+
+                               address += PAGE_SIZE;
+                               vaddr += PAGE_SIZE;
+                       }
+                       start_pte = 0;
+
+                       if (address >= end_paddr)
+                           break;
+               }
+               start_pmd = 0;
+       }
+}
+
 void free_initmem(void)
 {
        unsigned long addr;
        unsigned long init_begin = (unsigned long)__init_begin;
        unsigned long init_end = (unsigned long)__init_end;
 
-#ifdef CONFIG_DEBUG_KERNEL
+       /* The init text pages are marked R-X.  We have to
+        * flush the icache and mark them RW-
+        *
+        * This is tricky, because map_pages is in the init section.
+        * Do a dummy remap of the data section first (the data
+        * section is already PAGE_KERNEL) to pull in the TLB entries
+        * for map_kernel */
+       map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+                 PAGE_KERNEL_RWX, 1);
+       /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
+        * map_pages */
+       map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+                 PAGE_KERNEL, 1);
+
+       /* force the kernel to see the new TLB entries */
+       __flush_tlb_range(0, init_begin, init_end);
        /* Attempt to catch anyone trying to execute code here
         * by filling the page with BRK insns.
         */
        memset((void *)init_begin, 0x00, init_end - init_begin);
+       /* finally dump all the instructions which were cached, since the
+        * pages are no-longer executable */
        flush_icache_range(init_begin, init_end);
-#endif
        
-       /* align __init_begin and __init_end to page size,
-          ignoring linker script where we might have tried to save RAM */
-       init_begin = PAGE_ALIGN(init_begin);
-       init_end = PAGE_ALIGN(init_end);
        for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
@@ -618,114 +752,6 @@ void show_mem(unsigned int filter)
 #endif
 }
 
-
-static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
-{
-       pgd_t *pg_dir;
-       pmd_t *pmd;
-       pte_t *pg_table;
-       unsigned long end_paddr;
-       unsigned long start_pmd;
-       unsigned long start_pte;
-       unsigned long tmp1;
-       unsigned long tmp2;
-       unsigned long address;
-       unsigned long ro_start;
-       unsigned long ro_end;
-       unsigned long fv_addr;
-       unsigned long gw_addr;
-       extern const unsigned long fault_vector_20;
-       extern void * const linux_gateway_page;
-
-       ro_start = __pa((unsigned long)_text);
-       ro_end   = __pa((unsigned long)&data_start);
-       fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
-       gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
-
-       end_paddr = start_paddr + size;
-
-       pg_dir = pgd_offset_k(start_vaddr);
-
-#if PTRS_PER_PMD == 1
-       start_pmd = 0;
-#else
-       start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-#endif
-       start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-
-       address = start_paddr;
-       while (address < end_paddr) {
-#if PTRS_PER_PMD == 1
-               pmd = (pmd_t *)__pa(pg_dir);
-#else
-               pmd = (pmd_t *)pgd_address(*pg_dir);
-
-               /*
-                * pmd is physical at this point
-                */
-
-               if (!pmd) {
-                       pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
-                       pmd = (pmd_t *) __pa(pmd);
-               }
-
-               pgd_populate(NULL, pg_dir, __va(pmd));
-#endif
-               pg_dir++;
-
-               /* now change pmd to kernel virtual addresses */
-
-               pmd = (pmd_t *)__va(pmd) + start_pmd;
-               for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
-
-                       /*
-                        * pg_table is physical at this point
-                        */
-
-                       pg_table = (pte_t *)pmd_address(*pmd);
-                       if (!pg_table) {
-                               pg_table = (pte_t *)
-                                       alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
-                               pg_table = (pte_t *) __pa(pg_table);
-                       }
-
-                       pmd_populate_kernel(NULL, pmd, __va(pg_table));
-
-                       /* now change pg_table to kernel virtual addresses */
-
-                       pg_table = (pte_t *) __va(pg_table) + start_pte;
-                       for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
-                               pte_t pte;
-
-                               /*
-                                * Map the fault vector writable so we can
-                                * write the HPMC checksum.
-                                */
-#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
-                               if (address >= ro_start && address < ro_end
-                                                       && address != fv_addr
-                                                       && address != gw_addr)
-                                   pte = __mk_pte(address, PAGE_KERNEL_RO);
-                               else
-#endif
-                                   pte = __mk_pte(address, pgprot);
-
-                               if (address >= end_paddr)
-                                       pte_val(pte) = 0;
-
-                               set_pte(pg_table, pte);
-
-                               address += PAGE_SIZE;
-                       }
-                       start_pte = 0;
-
-                       if (address >= end_paddr)
-                           break;
-               }
-               start_pmd = 0;
-       }
-}
-
 /*
  * pagetable_init() sets up the page tables
  *
@@ -750,14 +776,14 @@ static void __init pagetable_init(void)
                size = pmem_ranges[range].pages << PAGE_SHIFT;
 
                map_pages((unsigned long)__va(start_paddr), start_paddr,
-                       size, PAGE_KERNEL);
+                         size, PAGE_KERNEL, 0);
        }
 
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_end && initrd_end > mem_limit) {
                printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
                map_pages(initrd_start, __pa(initrd_start),
-                       initrd_end - initrd_start, PAGE_KERNEL);
+                         initrd_end - initrd_start, PAGE_KERNEL, 0);
        }
 #endif
 
@@ -782,7 +808,7 @@ static void __init gateway_init(void)
         */
 
        map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
-               PAGE_SIZE, PAGE_GATEWAY);
+                 PAGE_SIZE, PAGE_GATEWAY, 1);
 }
 
 #ifdef CONFIG_HPUX
index e560d102215a18833ef2ecc210d67ce3ec7cc83e..63a027c9ada5aabf99fdea93c2f355e431076cce 100644 (file)
@@ -25,6 +25,10 @@ config SPARC
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_GENERIC_HARDIRQS
+       select GENERIC_HARDIRQS_NO_DEPRECATED
+       select GENERIC_IRQ_SHOW
+       select USE_GENERIC_SMP_HELPERS if SMP
 
 config SPARC32
        def_bool !64BIT
@@ -43,15 +47,12 @@ config SPARC64
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_SYSCALL_TRACEPOINTS
-       select USE_GENERIC_SMP_HELPERS if SMP
        select RTC_DRV_CMOS
        select RTC_DRV_BQ4802
        select RTC_DRV_SUN4V
        select RTC_DRV_STARFIRE
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
-       select HAVE_GENERIC_HARDIRQS
-       select GENERIC_IRQ_SHOW
        select IRQ_PREFLOW_FASTEOI
 
 config ARCH_DEFCONFIG
index 31d48a0e32c7cd3cd3b4eda72325229496f00411..a4c5a938b93663669923f11b786d1f8127992f12 100644 (file)
@@ -16,6 +16,10 @@ typedef struct {
        unsigned long clock_tick;
        unsigned int multiplier;
        unsigned int counter;
+#ifdef CONFIG_SMP
+       unsigned int irq_resched_count;
+       unsigned int irq_call_count;
+#endif
        int prom_node;
        int mid;
        int next;
@@ -23,5 +27,6 @@ typedef struct {
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
+#define local_cpu_data() __get_cpu_var(__cpu_data)
 
 #endif /* _SPARC_CPUDATA_H */
index 86666f70322e753430d661b6c854a5150208ff9e..482c79e2a41685343025cb9514eef7e205b2f637 100644 (file)
@@ -281,28 +281,27 @@ static inline void sun_fd_enable_dma(void)
        pdma_areasize = pdma_size;
 }
 
-/* Our low-level entry point in arch/sparc/kernel/entry.S */
-extern int sparc_floppy_request_irq(int irq, unsigned long flags,
-                                   irq_handler_t irq_handler);
+extern int sparc_floppy_request_irq(unsigned int irq,
+                                    irq_handler_t irq_handler);
 
 static int sun_fd_request_irq(void)
 {
        static int once = 0;
-       int error;
 
-       if(!once) {
+       if (!once) {
                once = 1;
-               error = sparc_floppy_request_irq(FLOPPY_IRQ,
-                                                IRQF_DISABLED,
-                                                floppy_interrupt);
-               return ((error == 0) ? 0 : -1);
-       } else return 0;
+               return sparc_floppy_request_irq(FLOPPY_IRQ, floppy_interrupt);
+       } else {
+               return 0;
+       }
 }
 
 static struct linux_prom_registers fd_regs[2];
 
 static int sun_floppy_init(void)
 {
+       struct platform_device *op;
+       struct device_node *dp;
        char state[128];
        phandle tnode, fd_node;
        int num_regs;
@@ -310,7 +309,6 @@ static int sun_floppy_init(void)
 
        use_virtual_dma = 1;
 
-       FLOPPY_IRQ = 11;
        /* Forget it if we aren't on a machine that could possibly
         * ever have a floppy drive.
         */
@@ -349,6 +347,26 @@ static int sun_floppy_init(void)
        sun_fdc = (struct sun_flpy_controller *)
            of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
 
+       /* Look up irq in platform_device.
+        * We try "SUNW,fdtwo" and "fd"
+        */
+       for_each_node_by_name(dp, "SUNW,fdtwo") {
+               op = of_find_device_by_node(dp);
+               if (op)
+                       break;
+       }
+       if (!op) {
+               for_each_node_by_name(dp, "fd") {
+                       op = of_find_device_by_node(dp);
+                       if (op)
+                               break;
+               }
+       }
+       if (!op)
+               goto no_sun_fdc;
+
+       FLOPPY_IRQ = op->archdata.irqs[0];
+
        /* Last minute sanity check... */
        if(sun_fdc->status_82072 == 0xff) {
                sun_fdc = NULL;
index a34b2994937ab2e4323de0308ab8a630729dcf70..f6902cf3cbe997f5c66ec750e1ebafcfdc2e9f5b 100644 (file)
@@ -5,4 +5,17 @@
 #else
 #include <asm/io_32.h>
 #endif
+
+/*
+ * Defines used for both SPARC32 and SPARC64
+ */
+
+/* Big endian versions of memory read/write routines */
+#define readb_be(__addr)       __raw_readb(__addr)
+#define readw_be(__addr)       __raw_readw(__addr)
+#define readl_be(__addr)       __raw_readl(__addr)
+#define writeb_be(__b, __addr) __raw_writeb(__b, __addr)
+#define writel_be(__w, __addr) __raw_writel(__w, __addr)
+#define writew_be(__l, __addr) __raw_writew(__l, __addr)
+
 #endif
index eced3e3ebd30f5603b6a236d0bd7886e67d1ddf2..2ae3acaeb1b31980d0fa0247de7de7912f4e5389 100644 (file)
@@ -6,7 +6,11 @@
 #ifndef _SPARC_IRQ_H
 #define _SPARC_IRQ_H
 
-#define NR_IRQS    16
+/* Allocated number of logical irq numbers.
+ * sun4d boxes (ss2000e) should be OK with ~32.
+ * Be on the safe side and make room for 64
+ */
+#define NR_IRQS    64
 
 #include <linux/interrupt.h>
 
index c04f96fb753c447a2c8e0a5261ff3816497a1886..6bdaf1e43d2aef8eba06e01da9f963706d54c2d2 100644 (file)
 #define LEON_DIAGF_VALID       0x2000
 #define LEON_DIAGF_VALID_SHIFT 13
 
-/*
- *  Interrupt Sources
- *
- *  The interrupt source numbers directly map to the trap type and to
- *  the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask,
- *  and the Interrupt Pending Registers.
- */
-#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR        1
-#define LEON_INTERRUPT_UART_1_RX_TX            2
-#define LEON_INTERRUPT_UART_0_RX_TX            3
-#define LEON_INTERRUPT_EXTERNAL_0              4
-#define LEON_INTERRUPT_EXTERNAL_1              5
-#define LEON_INTERRUPT_EXTERNAL_2              6
-#define LEON_INTERRUPT_EXTERNAL_3              7
-#define LEON_INTERRUPT_TIMER1                  8
-#define LEON_INTERRUPT_TIMER2                  9
-#define LEON_INTERRUPT_EMPTY1                  10
-#define LEON_INTERRUPT_EMPTY2                  11
-#define LEON_INTERRUPT_OPEN_ETH                        12
-#define LEON_INTERRUPT_EMPTY4                  13
-#define LEON_INTERRUPT_EMPTY5                  14
-#define LEON_INTERRUPT_EMPTY6                  15
-
 /* irq masks */
 #define LEON_HARD_INT(x)       (1 << (x))      /* irq 0-15 */
 #define LEON_IRQMASK_R         0x0000fffe      /* bit 15- 1 of lregs.irqmask */
@@ -183,7 +160,6 @@ static inline void leon_srmmu_enabletlb(void)
 /* macro access for leon_readnobuffer_reg() */
 #define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
 
-extern void sparc_leon_eirq_register(int eirq);
 extern void leon_init(void);
 extern void leon_switch_mm(void);
 extern void leon_init_IRQ(void);
@@ -239,8 +215,8 @@ static inline int sparc_leon3_cpuid(void)
 #endif /*!__ASSEMBLY__*/
 
 #ifdef CONFIG_SMP
-# define LEON3_IRQ_RESCHEDULE          13
-# define LEON3_IRQ_TICKER              (leon_percpu_timer_dev[0].irq)
+# define LEON3_IRQ_IPI_DEFAULT         13
+# define LEON3_IRQ_TICKER              (leon3_ticker_irq)
 # define LEON3_IRQ_CROSS_CALL          15
 #endif
 
@@ -339,9 +315,9 @@ struct leon2_cacheregs {
 #include <linux/interrupt.h>
 
 struct device_node;
-extern int sparc_leon_eirq_get(int eirq, int cpu);
-extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id);
-extern void sparc_leon_eirq_register(int eirq);
+extern unsigned int leon_build_device_irq(unsigned int real_irq,
+                                          irq_flow_handler_t flow_handler,
+                                          const char *name, int do_ack);
 extern void leon_clear_clock_irq(void);
 extern void leon_load_profile_irq(int cpu, unsigned int limit);
 extern void leon_init_timers(irq_handler_t counter_fn);
@@ -358,6 +334,7 @@ extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
 extern int leon_flush_needed(void);
 extern void leon_switch_mm(void);
 extern int srmmu_swprobe_trace;
+extern int leon3_ticker_irq;
 
 #ifdef CONFIG_SMP
 extern int leon_smp_nrcpus(void);
@@ -366,17 +343,19 @@ extern void leon_smp_done(void);
 extern void leon_boot_cpus(void);
 extern int leon_boot_one_cpu(int i);
 void leon_init_smp(void);
-extern void cpu_probe(void);
 extern void cpu_idle(void);
 extern void init_IRQ(void);
 extern void cpu_panic(void);
 extern int __leon_processor_id(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
+extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
-extern unsigned int real_irq_entry[], smpleon_ticker[];
+extern unsigned int real_irq_entry[];
+extern unsigned int smpleon_ipi[];
 extern unsigned int patchme_maybe_smp_msg[];
 extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
 extern unsigned int linux_trap_ipi15_sun4m[];
+extern int leon_ipi_irq;
 
 #endif /* CONFIG_SMP */
 
index f20ef562b265290f71d66c755368af1a97cdd786..7eb5d78f5211d04782430eeb8c71b8083522f22f 100644 (file)
@@ -29,11 +29,17 @@ struct linux_pcic {
        int                     pcic_imdim;
 };
 
-extern int pcic_probe(void);
-/* Erm... MJ redefined pcibios_present() so that it does not work early. */
+#ifdef CONFIG_PCI
 extern int pcic_present(void);
+extern int pcic_probe(void);
+extern void pci_time_init(void);
 extern void sun4m_pci_init_IRQ(void);
-
+#else
+static inline int pcic_present(void) { return 0; }
+static inline int pcic_probe(void) { return 0; }
+static inline void pci_time_init(void) {}
+static inline void sun4m_pci_init_IRQ(void) {}
+#endif
 #endif
 
 /* Size of PCI I/O space which we relocate. */
index 303bd4dc82927b3bf85371c01200054f94e101bb..5b31a8e89823699fbe5f99ebf6eb8d34bac8cb46 100644 (file)
@@ -8,6 +8,8 @@
  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
+#include <linux/const.h>
+
 #ifndef __ASSEMBLY__
 #include <asm-generic/4level-fixup.h>
 
@@ -456,9 +458,9 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
 
 #endif /* !(__ASSEMBLY__) */
 
-#define VMALLOC_START           0xfe600000
+#define VMALLOC_START           _AC(0xfe600000,UL)
 /* XXX Alter this when I get around to fixing sun4c - Anton */
-#define VMALLOC_END             0xffc00000
+#define VMALLOC_END             _AC(0xffc00000,UL)
 
 
 /* We provide our own get_unmapped_area to cope with VA holes for userland */
index f8dddb7045bbb2d16744e09b4200969178c2eb89..b77128c80524fb7166a83d7411bd0c1a66acc5fb 100644 (file)
@@ -699,6 +699,9 @@ extern pmd_t swapper_low_pmd_dir[2048];
 extern void paging_init(void);
 extern unsigned long find_ecache_flush_span(unsigned long size);
 
+struct seq_file;
+extern void mmu_info(struct seq_file *);
+
 /* These do nothing with the way I have things setup. */
 #define mmu_lockarea(vaddr, len)               (vaddr)
 #define mmu_unlockarea(vaddr, len)             do { } while(0)
index 2643c62f4ac0d6809797e2b851e98fe8ce5e8042..64718ba26434bb490e2c27a719d5a20f0ea76946 100644 (file)
 # define COMMAND_LINE_SIZE 256
 #endif
 
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SPARC32
+/* The CPU that was used for booting
+ * Only sun4d + leon may have boot_cpu_id != 0
+ */
+extern unsigned char boot_cpu_id;
+extern unsigned char boot_cpu_id4;
+#endif
+
+#endif /* __KERNEL__ */
+
 #endif /* _SPARC_SETUP_H */
index d82d7f4c0a798f58eef6a4af71bfaa0e2ed357fa..093f10843ff2981c813e8f9669f8f211c3f9ffd1 100644 (file)
@@ -50,42 +50,38 @@ void smp_callin(void);
 void smp_boot_cpus(void);
 void smp_store_cpu_info(int);
 
+void smp_resched_interrupt(void);
+void smp_call_function_single_interrupt(void);
+void smp_call_function_interrupt(void);
+
 struct seq_file;
 void smp_bogo(struct seq_file *);
 void smp_info(struct seq_file *);
 
 BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
 BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
+BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
+BTFIXUPDEF_CALL(void, smp_ipi_single, int);
+BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
 BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
 BTFIXUPDEF_BLACKBOX(load_current)
 
 #define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
 
-static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
+static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
 static inline void xc1(smpfunc_t func, unsigned long arg1)
-{ smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
+{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
 static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
-{ smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
+{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
 static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
                           unsigned long arg3)
-{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
+{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
 static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
                           unsigned long arg3, unsigned long arg4)
-{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
-
-static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
-{
-       xc1((smpfunc_t)func, (unsigned long)info);
-       return 0;
-}
+{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
 
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
-                                          void *info, int wait)
-{
-       smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
-                      (unsigned long) info, 0, 0, 0);
-       return 0;
-}
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 static inline int cpu_logical_map(int cpu)
 {
@@ -135,6 +131,11 @@ static inline int hard_smp_processor_id(void)
                __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
                                     "nop; nop" :
                                     "=&r" (cpuid));
+                    - leon
+               __asm__ __volatile__(   "rd %asr17, %0\n\t"
+                                       "srl %0, 0x1c, %0\n\t"
+                                       "nop\n\t" :
+                                       "=&r" (cpuid));
           See btfixup.h and btfixupprep.c to understand how a blackbox works.
         */
        __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
index f49e11cd4ded282c7084a86672c6ee8929fa4989..20bca89507102d84e6eecc0cb0272eaa9433b46a 100644 (file)
@@ -49,6 +49,10 @@ extern void cpu_play_dead(void);
 
 extern void smp_fetch_global_regs(void);
 
+struct seq_file;
+void smp_bogo(struct seq_file *);
+void smp_info(struct seq_file *);
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern int __cpu_disable(void);
 extern void __cpu_die(unsigned int cpu);
index 7f9b9dba38a64896705ecb85b9d897e71125096d..5f5b8bf3f50d39a5c4b8cae94231cdd87543bbfa 100644 (file)
@@ -9,6 +9,7 @@
 #ifndef __ASSEMBLY__
 
 #include <asm/psr.h>
+#include <asm/processor.h> /* for cpu_relax */
 
 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 
index 890036b3689aa2e8ee58b083926469c1f555daa2..47a7e862474efa94247d0192945ae23af8fcb78e 100644 (file)
 
 #include <linux/irqflags.h>
 
-static inline unsigned int probe_irq_mask(unsigned long val)
-{
-       return 0;
-}
-
 /*
  * Sparc (general) CPU types
  */
index e3b65d8cf41b715bb71b9339c9184e42feea01d0..3c96d3bb9f151d0112c922dedea653bc3acafe4d 100644 (file)
@@ -29,10 +29,6 @@ enum sparc_cpu {
 /* This cannot ever be a sun4c :) That's just history. */
 #define ARCH_SUN4C 0
 
-extern const char *sparc_cpu_type;
-extern const char *sparc_fpu_type;
-extern const char *sparc_pmu_type;
-
 extern char reboot_command[];
 
 /* These are here in an effort to more fully work around Spitfire Errata
index 5b0a06dc3bcbcd7e17a5813faef90ce063a45eb4..a9be04b0d049c28c7aae4893ea548e7071721326 100644 (file)
         st       %scratch, [%cur_reg + TI_W_SAVED];
 
 #ifdef CONFIG_SMP
+/* Results of LOAD_CURRENT() after BTFIXUP for SUN4M, SUN4D & LEON (comments) */
 #define LOAD_CURRENT4M(dest_reg, idreg) \
         rd       %tbr, %idreg; \
        sethi    %hi(current_set), %dest_reg; \
        or      %dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
        ld      [%idreg + %dest_reg], %dest_reg;
 
+#define LOAD_CURRENT_LEON(dest_reg, idreg)                     \
+       rd      %asr17, %idreg;                                 \
+       sethi   %hi(current_set), %dest_reg;                    \
+       srl     %idreg, 0x1c, %idreg;                           \
+       or      %dest_reg, %lo(current_set), %dest_reg;         \
+       sll     %idreg, 0x2, %idreg;                            \
+       ld      [%idreg + %dest_reg], %dest_reg;
+
 /* Blackbox - take care with this... - check smp4m and smp4d before changing this. */
 #define LOAD_CURRENT(dest_reg, idreg)                                  \
        sethi    %hi(___b_load_current), %idreg;                        \
index 99aa4db6e9c2963f154052611055a535f64762a1..9cff2709a96df89576b460d1138199c79c4eb5c2 100644 (file)
@@ -71,10 +71,6 @@ obj-$(CONFIG_SPARC64)        += pcr.o
 obj-$(CONFIG_SPARC64)  += nmi.o
 obj-$(CONFIG_SPARC64_SMP) += cpumap.o
 
-# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
-obj-$(CONFIG_SPARC32)     += devres.o
-devres-y                  := ../../../kernel/irq/devres.o
-
 obj-y                     += dma.o
 
 obj-$(CONFIG_SPARC32_PCI) += pcic.o
index 7925c54f41330f3003486914e3393f078ccb57d0..138dbbc8dc8475d477119420fec89d582577006f 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/seq_file.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -11,7 +12,9 @@
 #include <linux/threads.h>
 
 #include <asm/spitfire.h>
+#include <asm/pgtable.h>
 #include <asm/oplib.h>
+#include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/head.h>
 #include <asm/psr.h>
@@ -23,6 +26,9 @@
 DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
 EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
+int ncpus_probed;
+unsigned int fsr_storage;
+
 struct cpu_info {
        int psr_vers;
        const char *name;
@@ -247,13 +253,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
  * machine type value into consideration too.  I will fix this.
  */
 
-const char *sparc_cpu_type;
-const char *sparc_fpu_type;
+static const char *sparc_cpu_type;
+static const char *sparc_fpu_type;
 const char *sparc_pmu_type;
 
-unsigned int fsr_storage;
 
-static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
+static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
 {
        const struct manufacturer_info *manuf;
        int i;
@@ -313,7 +318,123 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
 }
 
 #ifdef CONFIG_SPARC32
-void __cpuinit cpu_probe(void)
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+       seq_printf(m,
+                  "cpu\t\t: %s\n"
+                  "fpu\t\t: %s\n"
+                  "promlib\t\t: Version %d Revision %d\n"
+                  "prom\t\t: %d.%d\n"
+                  "type\t\t: %s\n"
+                  "ncpus probed\t: %d\n"
+                  "ncpus active\t: %d\n"
+#ifndef CONFIG_SMP
+                  "CPU0Bogo\t: %lu.%02lu\n"
+                  "CPU0ClkTck\t: %ld\n"
+#endif
+                  ,
+                  sparc_cpu_type,
+                  sparc_fpu_type ,
+                  romvec->pv_romvers,
+                  prom_rev,
+                  romvec->pv_printrev >> 16,
+                  romvec->pv_printrev & 0xffff,
+                  &cputypval[0],
+                  ncpus_probed,
+                  num_online_cpus()
+#ifndef CONFIG_SMP
+                  , cpu_data(0).udelay_val/(500000/HZ),
+                  (cpu_data(0).udelay_val/(5000/HZ)) % 100,
+                  cpu_data(0).clock_tick
+#endif
+               );
+
+#ifdef CONFIG_SMP
+       smp_bogo(m);
+#endif
+       mmu_info(m);
+#ifdef CONFIG_SMP
+       smp_info(m);
+#endif
+       return 0;
+}
+#endif /* CONFIG_SPARC32 */
+
+#ifdef CONFIG_SPARC64
+unsigned int dcache_parity_tl1_occurred;
+unsigned int icache_parity_tl1_occurred;
+
+
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+       seq_printf(m,
+                  "cpu\t\t: %s\n"
+                  "fpu\t\t: %s\n"
+                  "pmu\t\t: %s\n"
+                  "prom\t\t: %s\n"
+                  "type\t\t: %s\n"
+                  "ncpus probed\t: %d\n"
+                  "ncpus active\t: %d\n"
+                  "D$ parity tl1\t: %u\n"
+                  "I$ parity tl1\t: %u\n"
+#ifndef CONFIG_SMP
+                  "Cpu0ClkTck\t: %016lx\n"
+#endif
+                  ,
+                  sparc_cpu_type,
+                  sparc_fpu_type,
+                  sparc_pmu_type,
+                  prom_version,
+                  ((tlb_type == hypervisor) ?
+                   "sun4v" :
+                   "sun4u"),
+                  ncpus_probed,
+                  num_online_cpus(),
+                  dcache_parity_tl1_occurred,
+                  icache_parity_tl1_occurred
+#ifndef CONFIG_SMP
+                  , cpu_data(0).clock_tick
+#endif
+               );
+#ifdef CONFIG_SMP
+       smp_bogo(m);
+#endif
+       mmu_info(m);
+#ifdef CONFIG_SMP
+       smp_info(m);
+#endif
+       return 0;
+}
+#endif /* CONFIG_SPARC64 */
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       /* The pointer we are returning is arbitrary,
+        * it just has to be non-NULL and not IS_ERR
+        * in the success case.
+        */
+       return *pos == 0 ? &c_start : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+       .start =c_start,
+       .next = c_next,
+       .stop = c_stop,
+       .show = show_cpuinfo,
+};
+
+#ifdef CONFIG_SPARC32
+static int __init cpu_type_probe(void)
 {
        int psr_impl, psr_vers, fpu_vers;
        int psr;
@@ -332,8 +453,12 @@ void __cpuinit cpu_probe(void)
        put_psr(psr);
 
        set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
+
+       return 0;
 }
-#else
+#endif /* CONFIG_SPARC32 */
+
+#ifdef CONFIG_SPARC64
 static void __init sun4v_cpu_probe(void)
 {
        switch (sun4v_chip_type) {
@@ -374,6 +499,6 @@ static int __init cpu_type_probe(void)
        }
        return 0;
 }
+#endif /* CONFIG_SPARC64 */
 
 early_initcall(cpu_type_probe);
-#endif
index 8de64c8126bcc3f7d19abc7d4c9f49e2eb485471..d91fd782743a1ed1cd5c001407dec13f428922a5 100644 (file)
@@ -202,7 +202,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
        new_tree->total_nodes = n;
        memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
 
-       prev_cpu = cpu = first_cpu(cpu_online_map);
+       prev_cpu = cpu = cpumask_first(cpu_online_mask);
 
        /* Initialize all levels in the tree with the first CPU */
        for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
@@ -381,7 +381,7 @@ static int simple_map_to_cpu(unsigned int index)
        }
 
        /* Impossible, since num_online_cpus() <= num_possible_cpus() */
-       return first_cpu(cpu_online_map);
+       return cpumask_first(cpu_online_mask);
 }
 
 static int _map_to_cpu(unsigned int index)
index d2eddd6647cd2d1debfb79d11a9bce1cc87e8c74..113c052c30432ed7cd1316b5e46d9e9ffa5bcfa1 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/system.h>
 #include <asm/cpudata.h>
 
-extern void cpu_probe(void);
 extern void clock_stop_probe(void); /* tadpole.c */
 extern void sun4c_probe_memerr_reg(void);
 
@@ -115,7 +114,7 @@ int cpu_get_hwmid(phandle prom_node)
 
 void __init device_scan(void)
 {
-       prom_printf("Booting Linux...\n");
+       printk(KERN_NOTICE "Booting Linux...\n");
 
 #ifndef CONFIG_SMP
        {
@@ -133,7 +132,6 @@ void __init device_scan(void)
        }
 #endif /* !CONFIG_SMP */
 
-       cpu_probe();
        {
                extern void auxio_probe(void);
                extern void auxio_power_probe(void);
index 3add4de8a1a9804c89f1b4ab5ba3f73a1c167c74..dd1342c0a3beec2b2743a888e256bb90f677a594 100644 (file)
@@ -497,7 +497,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
        tag->num_records = ncpus;
 
        i = 0;
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                ent[i].cpu = cpu;
                ent[i].result = DR_CPU_RES_OK;
                ent[i].stat = default_stat;
@@ -534,7 +534,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
        int resp_len, ncpus, cpu;
        unsigned long flags;
 
-       ncpus = cpus_weight(*mask);
+       ncpus = cpumask_weight(mask);
        resp_len = dr_cpu_size_response(ncpus);
        resp = kzalloc(resp_len, GFP_KERNEL);
        if (!resp)
@@ -547,7 +547,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
        mdesc_populate_present_mask(mask);
        mdesc_fill_in_cpu_data(mask);
 
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                int err;
 
                printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
@@ -593,7 +593,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
        int resp_len, ncpus, cpu;
        unsigned long flags;
 
-       ncpus = cpus_weight(*mask);
+       ncpus = cpumask_weight(mask);
        resp_len = dr_cpu_size_response(ncpus);
        resp = kzalloc(resp_len, GFP_KERNEL);
        if (!resp)
@@ -603,7 +603,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
                             resp_len, ncpus, mask,
                             DR_CPU_STAT_UNCONFIGURED);
 
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                int err;
 
                printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
@@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp,
 
        purge_dups(cpu_list, tag->num_records);
 
-       cpus_clear(mask);
+       cpumask_clear(&mask);
        for (i = 0; i < tag->num_records; i++) {
                if (cpu_list[i] == CPU_SENTINEL)
                        continue;
 
                if (cpu_list[i] < nr_cpu_ids)
-                       cpu_set(cpu_list[i], mask);
+                       cpumask_set_cpu(cpu_list[i], &mask);
        }
 
        if (tag->type == DR_CPU_CONFIGURE)
index 6da784a5612be01936a34a96a4706d95d2c12b03..8341963f4c84c10f5974edfa6f10af7db42754bd 100644 (file)
@@ -269,19 +269,22 @@ smp4m_ticker:
        /* Here is where we check for possible SMP IPI passed to us
         * on some level other than 15 which is the NMI and only used
         * for cross calls.  That has a separate entry point below.
+        *
+        * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
         */
 maybe_smp4m_msg:
        GET_PROCESSOR4M_ID(o3)
        sethi   %hi(sun4m_irq_percpu), %l5
        sll     %o3, 2, %o3
        or      %l5, %lo(sun4m_irq_percpu), %o5
-       sethi   %hi(0x40000000), %o2
+       sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
        ld      [%o5 + %o3], %o1
        ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
        andcc   %o3, %o2, %g0
        be,a    smp4m_ticker
         cmp    %l7, 14
-       st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x40000000
+       /* Soft-IRQ IPI */
+       st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
        WRITE_PAUSE
        ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
        WRITE_PAUSE
@@ -290,9 +293,27 @@ maybe_smp4m_msg:
        WRITE_PAUSE
        wr      %l4, PSR_ET, %psr
        WRITE_PAUSE
-       call    smp_reschedule_irq
+       sll     %o2, 28, %o2            ! shift for simpler checks below
+maybe_smp4m_msg_check_single:
+       andcc   %o2, 0x1, %g0
+       beq,a   maybe_smp4m_msg_check_mask
+        andcc  %o2, 0x2, %g0
+       call    smp_call_function_single_interrupt
         nop
-
+       andcc   %o2, 0x2, %g0
+maybe_smp4m_msg_check_mask:
+       beq,a   maybe_smp4m_msg_check_resched
+        andcc  %o2, 0x4, %g0
+       call    smp_call_function_interrupt
+        nop
+       andcc   %o2, 0x4, %g0
+maybe_smp4m_msg_check_resched:
+       /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
+       beq,a   maybe_smp4m_msg_out
+        nop
+       call    smp_resched_interrupt
+        nop
+maybe_smp4m_msg_out:
        RESTORE_ALL
 
        .align  4
@@ -401,18 +422,18 @@ linux_trap_ipi15_sun4d:
 1:     b,a     1b
 
 #ifdef CONFIG_SPARC_LEON
-
-       .globl  smpleon_ticker
-       /* SMP per-cpu ticker interrupts are handled specially. */
-smpleon_ticker:
+       .globl  smpleon_ipi
+       .extern leon_ipi_interrupt
+       /* SMP per-cpu IPI interrupts are handled specially. */
+smpleon_ipi:
         SAVE_ALL
        or      %l0, PSR_PIL, %g2
        wr      %g2, 0x0, %psr
        WRITE_PAUSE
        wr      %g2, PSR_ET, %psr
        WRITE_PAUSE
-       call    leon_percpu_timer_interrupt
-        add    %sp, STACKFRAME_SZ, %o0
+       call    leonsmp_ipi_interrupt
+        add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
        wr      %l0, PSR_ET, %psr
        WRITE_PAUSE
        RESTORE_ALL
index 59423491cef85211581bf81b761d3796185cf0c1..58778575983847d37b60ba88c9de2c7cd2883c63 100644 (file)
@@ -810,30 +810,24 @@ found_version:
 got_prop:
 #ifdef CONFIG_SPARC_LEON
                /* no cpu-type check is needed, it is a SPARC-LEON */
-#ifdef CONFIG_SMP
-               ba leon_smp_init
-                nop
 
-               .global leon_smp_init
-leon_smp_init:
-               sethi   %hi(boot_cpu_id), %g1    ! master always 0
-               stb     %g0, [%g1 + %lo(boot_cpu_id)]
-               sethi   %hi(boot_cpu_id4), %g1   ! master always 0
-               stb     %g0, [%g1 + %lo(boot_cpu_id4)]
+               sethi   %hi(boot_cpu_id), %g2   ! boot-cpu index
 
-               rd     %asr17,%g1
-               srl    %g1,28,%g1
+#ifdef CONFIG_SMP
+               ldub    [%g2 + %lo(boot_cpu_id)], %g1
+               cmp     %g1, 0xff               ! unset means first CPU
+               bne     leon_smp_cpu_startup    ! continue only with master
+                nop
+#endif
+               /* Get CPU-ID from most significant 4-bit of ASR17 */
+               rd     %asr17, %g1
+               srl    %g1, 28, %g1
 
-               cmp %g0,%g1
-                beq sun4c_continue_boot         !continue with master
-               nop
+               /* Update boot_cpu_id only on boot cpu */
+               stub    %g1, [%g2 + %lo(boot_cpu_id)]
 
-               ba leon_smp_cpu_startup
-                nop
-#else
                ba sun4c_continue_boot
                 nop
-#endif
 #endif
                set     cputypval, %o2
                ldub    [%o2 + 0x4], %l1
@@ -893,9 +887,6 @@ sun4d_init:
        sta     %g4, [%g0] ASI_M_VIKING_TMP1
        sethi   %hi(boot_cpu_id), %g5
        stb     %g4, [%g5 + %lo(boot_cpu_id)]
-       sll     %g4, 2, %g4
-       sethi   %hi(boot_cpu_id4), %g5
-       stb     %g4, [%g5 + %lo(boot_cpu_id4)]
 #endif
 
        /* Fall through to sun4m_init */
@@ -1024,14 +1015,28 @@ sun4c_continue_boot:
                bl      1b
                 add    %o0, 0x1, %o0
 
+               /* If boot_cpu_id has not been setup by machine specific
+                * init-code above we default it to zero.
+                */
+               sethi   %hi(boot_cpu_id), %g2
+               ldub    [%g2 + %lo(boot_cpu_id)], %g3
+               cmp     %g3, 0xff
+               bne     1f
+                nop
+               mov     %g0, %g3
+               stub    %g3, [%g2 + %lo(boot_cpu_id)]
+
+1:             /* boot_cpu_id set. calculate boot_cpu_id4 = boot_cpu_id*4 */
+               sll     %g3, 2, %g3
+               sethi   %hi(boot_cpu_id4), %g2
+               stub    %g3, [%g2 + %lo(boot_cpu_id4)]
+
                /* Initialize the uwinmask value for init task just in case.
                 * But first make current_set[boot_cpu_id] point to something useful.
                 */
                set     init_thread_union, %g6
                set     current_set, %g2
 #ifdef CONFIG_SMP
-               sethi   %hi(boot_cpu_id4), %g3
-               ldub    [%g3 + %lo(boot_cpu_id4)], %g3
                st      %g6, [%g2]
                add     %g2, %g3, %g2
 #endif
index c6ce9a6a47902bc8a5339bf77ecb08b111a72ea0..1c9c80a1a86af87611d4b77cc8b5a2c5244d1574 100644 (file)
 #include <asm/io-unit.h>
 #include <asm/leon.h>
 
+/* This function must make sure that caches and memory are coherent after DMA
+ * On LEON systems without cache snooping it flushes the entire D-CACHE.
+ */
 #ifndef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l)       /* Anton pulled it out for 2.4.0-xx */
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
+{
+}
 #else
-static inline void mmu_inval_dma_area(void *va, unsigned long len)
+static inline void dma_make_coherent(unsigned long pa, unsigned long len)
 {
        if (!sparc_leon3_snooping_enabled())
                leon_flush_dcache_all();
@@ -284,7 +289,6 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
                printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
                goto err_nova;
        }
-       mmu_inval_dma_area((void *)va, len_total);
 
        // XXX The mmu_map_dma_area does this for us below, see comments.
        // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
@@ -336,7 +340,6 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
        release_resource(res);
        kfree(res);
 
-       /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
        pgv = virt_to_page(p);
        mmu_unmap_dma_area(dev, ba, n);
 
@@ -463,7 +466,6 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
                printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
                goto err_nova;
        }
-       mmu_inval_dma_area(va, len_total);
        sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
        *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
@@ -489,7 +491,6 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
                                dma_addr_t ba)
 {
        struct resource *res;
-       void *pgp;
 
        if ((res = _sparc_find_resource(&_sparc_dvma,
            (unsigned long)p)) == NULL) {
@@ -509,14 +510,12 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
                return;
        }
 
-       pgp = phys_to_virt(ba); /* bus_to_virt actually */
-       mmu_inval_dma_area(pgp, n);
+       dma_make_coherent(ba, n);
        sparc_unmapiorange((unsigned long)p, n);
 
        release_resource(res);
        kfree(res);
-
-       free_pages((unsigned long)pgp, get_order(n));
+       free_pages((unsigned long)phys_to_virt(ba), get_order(n));
 }
 
 /*
@@ -535,7 +534,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
                             enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        if (dir != PCI_DMA_TODEVICE)
-               mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
 }
 
 /* Map a set of buffers described by scatterlist in streaming
@@ -562,8 +561,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
 
        /* IIep is write-through, not flushing. */
        for_each_sg(sgl, sg, nents, n) {
-               BUG_ON(page_address(sg_page(sg)) == NULL);
-               sg->dma_address = virt_to_phys(sg_virt(sg));
+               sg->dma_address = sg_phys(sg);
                sg->dma_length = sg->length;
        }
        return nents;
@@ -582,9 +580,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }
@@ -603,8 +599,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
                                      size_t size, enum dma_data_direction dir)
 {
        if (dir != PCI_DMA_TODEVICE) {
-               mmu_inval_dma_area(phys_to_virt(ba),
-                                  PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
        }
 }
 
@@ -612,8 +607,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
                                         size_t size, enum dma_data_direction dir)
 {
        if (dir != PCI_DMA_TODEVICE) {
-               mmu_inval_dma_area(phys_to_virt(ba),
-                                  PAGE_ALIGN(size));
+               dma_make_coherent(ba, PAGE_ALIGN(size));
        }
 }
 
@@ -631,9 +625,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }
@@ -646,9 +638,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
 
        if (dir != PCI_DMA_TODEVICE) {
                for_each_sg(sgl, sg, nents, n) {
-                       BUG_ON(page_address(sg_page(sg)) == NULL);
-                       mmu_inval_dma_area(page_address(sg_page(sg)),
-                                          PAGE_ALIGN(sg->length));
+                       dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
                }
        }
 }
index 008453b798ecc3fdae62e535b548e94b919beb18..100b9c204e78f5143c13ae9beb3a6256d28165e5 100644 (file)
@@ -2,6 +2,23 @@
 
 #include <asm/btfixup.h>
 
+struct irq_bucket {
+        struct irq_bucket *next;
+        unsigned int real_irq;
+        unsigned int irq;
+        unsigned int pil;
+};
+
+#define SUN4D_MAX_BOARD 10
+#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5)
+
+/* Map between the irq identifier used in hw to the
+ * irq_bucket. The map is sufficient large to hold
+ * the sun4d hw identifiers.
+ */
+extern struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
+
+
 /* sun4m specific type definitions */
 
 /* This maps direct to CPU specific interrupt registers */
@@ -35,6 +52,10 @@ struct sparc_irq_config {
 };
 extern struct sparc_irq_config sparc_irq_config;
 
+unsigned int irq_alloc(unsigned int real_irq, unsigned int pil);
+void irq_link(unsigned int irq);
+void irq_unlink(unsigned int irq);
+void handler_irq(unsigned int pil, struct pt_regs *regs);
 
 /* Dave Redman (djhr@tadpole.co.uk)
  * changed these to function pointers.. it saves cycles and will allow
@@ -44,33 +65,9 @@ extern struct sparc_irq_config sparc_irq_config;
  * Changed these to btfixup entities... It saves cycles :)
  */
 
-BTFIXUPDEF_CALL(void, disable_irq, unsigned int)
-BTFIXUPDEF_CALL(void, enable_irq, unsigned int)
-BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int)
-BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int)
 BTFIXUPDEF_CALL(void, clear_clock_irq, void)
 BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
 
-static inline void __disable_irq(unsigned int irq)
-{
-       BTFIXUP_CALL(disable_irq)(irq);
-}
-
-static inline void __enable_irq(unsigned int irq)
-{
-       BTFIXUP_CALL(enable_irq)(irq);
-}
-
-static inline void disable_pil_irq(unsigned int irq)
-{
-       BTFIXUP_CALL(disable_pil_irq)(irq);
-}
-
-static inline void enable_pil_irq(unsigned int irq)
-{
-       BTFIXUP_CALL(enable_pil_irq)(irq);
-}
-
 static inline void clear_clock_irq(void)
 {
        BTFIXUP_CALL(clear_clock_irq)();
@@ -89,4 +86,10 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
 #define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
 #define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
 #define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
+
+/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
+#define SUN4D_IPI_IRQ 14
+
+extern void sun4d_ipi_interrupt(void);
+
 #endif
index 7c93df4099cb168f0808815d19d69f47ab3ccc92..9b89d842913c0da9166be2f07e9cc4c330d40ea9 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cpudata.h>
 #include <asm/pcic.h>
 #include <asm/leon.h>
 
@@ -101,284 +102,173 @@ EXPORT_SYMBOL(arch_local_irq_restore);
  * directed CPU interrupts using the existing enable/disable irq code
  * with tweaks.
  *
+ * Sun4d complicates things even further.  IRQ numbers are arbitrary
+ * 32-bit values in that case.  Since this is similar to sparc64,
+ * we adopt a virtual IRQ numbering scheme as is done there.
+ * Virutal interrupt numbers are allocated by build_irq().  So NR_IRQS
+ * just becomes a limit of how many interrupt sources we can handle in
+ * a single system.  Even fully loaded SS2000 machines top off at
+ * about 32 interrupt sources or so, therefore a NR_IRQS value of 64
+ * is more than enough.
+  *
+ * We keep a map of per-PIL enable interrupts.  These get wired
+ * up via the irq_chip->startup() method which gets invoked by
+ * the generic IRQ layer during request_irq().
  */
 
 
+/* Table of allocated irqs. Unused entries has irq == 0 */
+static struct irq_bucket irq_table[NR_IRQS];
+/* Protect access to irq_table */
+static DEFINE_SPINLOCK(irq_table_lock);
 
-/*
- * Dave Redman (djhr@tadpole.co.uk)
- *
- * There used to be extern calls and hard coded values here.. very sucky!
- * instead, because some of the devices attach very early, I do something
- * equally sucky but at least we'll never try to free statically allocated
- * space or call kmalloc before kmalloc_init :(.
- *
- * In fact it's the timer10 that attaches first.. then timer14
- * then kmalloc_init is called.. then the tty interrupts attach.
- * hmmm....
- *
- */
-#define MAX_STATIC_ALLOC       4
-struct irqaction static_irqaction[MAX_STATIC_ALLOC];
-int static_irq_count;
-
-static struct {
-       struct irqaction *action;
-       int flags;
-} sparc_irq[NR_IRQS];
-#define SPARC_IRQ_INPROGRESS 1
-
-/* Used to protect the IRQ action lists */
-DEFINE_SPINLOCK(irq_action_lock);
+/* Map between the irq identifier used in hw to the irq_bucket. */
+struct irq_bucket *irq_map[SUN4D_MAX_IRQ];
+/* Protect access to irq_map */
+static DEFINE_SPINLOCK(irq_map_lock);
 
-int show_interrupts(struct seq_file *p, void *v)
+/* Allocate a new irq from the irq_table */
+unsigned int irq_alloc(unsigned int real_irq, unsigned int pil)
 {
-       int i = *(loff_t *)v;
-       struct irqaction *action;
        unsigned long flags;
-#ifdef CONFIG_SMP
-       int j;
-#endif
+       unsigned int i;
+
+       spin_lock_irqsave(&irq_table_lock, flags);
+       for (i = 1; i < NR_IRQS; i++) {
+               if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil)
+                       goto found;
+       }
 
-       if (sparc_cpu_model == sun4d)
-               return show_sun4d_interrupts(p, v);
+       for (i = 1; i < NR_IRQS; i++) {
+               if (!irq_table[i].irq)
+                       break;
+       }
 
-       spin_lock_irqsave(&irq_action_lock, flags);
        if (i < NR_IRQS) {
-               action = sparc_irq[i].action;
-               if (!action)
-                       goto out_unlock;
-               seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-               for_each_online_cpu(j) {
-                       seq_printf(p, "%10u ",
-                                   kstat_cpu(j).irqs[i]);
-               }
-#endif
-               seq_printf(p, " %c %s",
-                       (action->flags & IRQF_DISABLED) ? '+' : ' ',
-                       action->name);
-               for (action = action->next; action; action = action->next) {
-                       seq_printf(p, ",%s %s",
-                               (action->flags & IRQF_DISABLED) ? " +" : "",
-                               action->name);
-               }
-               seq_putc(p, '\n');
+               irq_table[i].real_irq = real_irq;
+               irq_table[i].irq = i;
+               irq_table[i].pil = pil;
+       } else {
+               printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+               i = 0;
        }
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-       return 0;
+found:
+       spin_unlock_irqrestore(&irq_table_lock, flags);
+
+       return i;
 }
 
-void free_irq(unsigned int irq, void *dev_id)
+/* Based on a single pil handler_irq may need to call several
+ * interrupt handlers. Use irq_map as entry to irq_table,
+ * and let each entry in irq_table point to the next entry.
+ */
+void irq_link(unsigned int irq)
 {
-       struct irqaction *action;
-       struct irqaction **actionp;
+       struct irq_bucket *p;
        unsigned long flags;
-       unsigned int cpu_irq;
-
-       if (sparc_cpu_model == sun4d) {
-               sun4d_free_irq(irq, dev_id);
-               return;
-       }
-       cpu_irq = irq & (NR_IRQS - 1);
-       if (cpu_irq > 14) {  /* 14 irq levels on the sparc */
-               printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq);
-               return;
-       }
+       unsigned int pil;
 
-       spin_lock_irqsave(&irq_action_lock, flags);
+       BUG_ON(irq >= NR_IRQS);
 
-       actionp = &sparc_irq[cpu_irq].action;
-       action = *actionp;
+       spin_lock_irqsave(&irq_map_lock, flags);
 
-       if (!action->handler) {
-               printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
-               goto out_unlock;
-       }
-       if (dev_id) {
-               for (; action; action = action->next) {
-                       if (action->dev_id == dev_id)
-                               break;
-                       actionp = &action->next;
-               }
-               if (!action) {
-                       printk(KERN_ERR "Trying to free free shared IRQ%d\n",
-                              irq);
-                       goto out_unlock;
-               }
-       } else if (action->flags & IRQF_SHARED) {
-               printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
-                      irq);
-               goto out_unlock;
-       }
-       if (action->flags & SA_STATIC_ALLOC) {
-               /*
-                * This interrupt is marked as specially allocated
-                * so it is a bad idea to free it.
-                */
-               printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
-                      irq, action->name);
-               goto out_unlock;
-       }
-
-       *actionp = action->next;
+       p = &irq_table[irq];
+       pil = p->pil;
+       BUG_ON(pil > SUN4D_MAX_IRQ);
+       p->next = irq_map[pil];
+       irq_map[pil] = p;
 
-       spin_unlock_irqrestore(&irq_action_lock, flags);
+       spin_unlock_irqrestore(&irq_map_lock, flags);
+}
 
-       synchronize_irq(irq);
+void irq_unlink(unsigned int irq)
+{
+       struct irq_bucket *p, **pnext;
+       unsigned long flags;
 
-       spin_lock_irqsave(&irq_action_lock, flags);
+       BUG_ON(irq >= NR_IRQS);
 
-       kfree(action);
+       spin_lock_irqsave(&irq_map_lock, flags);
 
-       if (!sparc_irq[cpu_irq].action)
-               __disable_irq(irq);
+       p = &irq_table[irq];
+       BUG_ON(p->pil > SUN4D_MAX_IRQ);
+       pnext = &irq_map[p->pil];
+       while (*pnext != p)
+               pnext = &(*pnext)->next;
+       *pnext = p->next;
 
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
+       spin_unlock_irqrestore(&irq_map_lock, flags);
 }
-EXPORT_SYMBOL(free_irq);
-
-/*
- * This is called when we want to synchronize with
- * interrupts. We may for example tell a device to
- * stop sending interrupts: but to make sure there
- * are no interrupts that are executing on another
- * CPU we need to call this function.
- */
-#ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
-{
-       unsigned int cpu_irq;
 
-       cpu_irq = irq & (NR_IRQS - 1);
-       while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
-               cpu_relax();
-}
-EXPORT_SYMBOL(synchronize_irq);
-#endif /* SMP */
 
-void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs)
+/* /proc/interrupts printing */
+int arch_show_interrupts(struct seq_file *p, int prec)
 {
-       int i;
-       struct irqaction *action;
-       unsigned int cpu_irq;
+       int j;
 
-       cpu_irq = irq & (NR_IRQS - 1);
-       action = sparc_irq[cpu_irq].action;
-
-       printk(KERN_ERR "IO device interrupt, irq = %d\n", irq);
-       printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
-                   regs->npc, regs->u_regs[14]);
-       if (action) {
-               printk(KERN_ERR "Expecting: ");
-               for (i = 0; i < 16; i++)
-                       if (action->handler)
-                               printk(KERN_CONT "[%s:%d:0x%x] ", action->name,
-                                      i, (unsigned int)action->handler);
-       }
-       printk(KERN_ERR "AIEEE\n");
-       panic("bogus interrupt received");
+#ifdef CONFIG_SMP
+       seq_printf(p, "RES: ");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
+       seq_printf(p, "     IPI rescheduling interrupts\n");
+       seq_printf(p, "CAL: ");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
+       seq_printf(p, "     IPI function call interrupts\n");
+#endif
+       seq_printf(p, "NMI: ");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", cpu_data(j).counter);
+       seq_printf(p, "     Non-maskable interrupts\n");
+       return 0;
 }
 
-void handler_irq(int pil, struct pt_regs *regs)
+void handler_irq(unsigned int pil, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
-       struct irqaction *action;
-       int cpu = smp_processor_id();
+       struct irq_bucket *p;
 
+       BUG_ON(pil > 15);
        old_regs = set_irq_regs(regs);
        irq_enter();
-       disable_pil_irq(pil);
-#ifdef CONFIG_SMP
-       /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
-       if ((sparc_cpu_model==sun4m) && (pil < 10))
-               smp4m_irq_rotate(cpu);
-#endif
-       action = sparc_irq[pil].action;
-       sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS;
-       kstat_cpu(cpu).irqs[pil]++;
-       do {
-               if (!action || !action->handler)
-                       unexpected_irq(pil, NULL, regs);
-               action->handler(pil, action->dev_id);
-               action = action->next;
-       } while (action);
-       sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS;
-       enable_pil_irq(pil);
+
+       p = irq_map[pil];
+       while (p) {
+               struct irq_bucket *next = p->next;
+
+               generic_handle_irq(p->irq);
+               p = next;
+       }
        irq_exit();
        set_irq_regs(old_regs);
 }
 
 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
+static unsigned int floppy_irq;
 
-/*
- * Fast IRQs on the Sparc can only have one routine attached to them,
- * thus no sharing possible.
- */
-static int request_fast_irq(unsigned int irq,
-                           void (*handler)(void),
-                           unsigned long irqflags, const char *devname)
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
 {
-       struct irqaction *action;
-       unsigned long flags;
        unsigned int cpu_irq;
-       int ret;
+       int err;
+
 #if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
        struct tt_entry *trap_table;
 #endif
-       cpu_irq = irq & (NR_IRQS - 1);
-       if (cpu_irq > 14) {
-               ret = -EINVAL;
-               goto out;
-       }
-       if (!handler) {
-               ret = -EINVAL;
-               goto out;
-       }
 
-       spin_lock_irqsave(&irq_action_lock, flags);
+       err = request_irq(irq, irq_handler, 0, "floppy", NULL);
+       if (err)
+               return -1;
 
-       action = sparc_irq[cpu_irq].action;
-       if (action) {
-               if (action->flags & IRQF_SHARED)
-                       panic("Trying to register fast irq when already shared.\n");
-               if (irqflags & IRQF_SHARED)
-                       panic("Trying to register fast irq as shared.\n");
+       /* Save for later use in floppy interrupt handler */
+       floppy_irq = irq;
 
-               /* Anyway, someone already owns it so cannot be made fast. */
-               printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n");
-               ret = -EBUSY;
-               goto out_unlock;
-       }
-
-       /*
-        * If this is flagged as statically allocated then we use our
-        * private struct which is never freed.
-        */
-       if (irqflags & SA_STATIC_ALLOC) {
-               if (static_irq_count < MAX_STATIC_ALLOC)
-                       action = &static_irqaction[static_irq_count++];
-               else
-                       printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
-                              irq, devname);
-       }
-
-       if (action == NULL)
-               action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
-       if (!action) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       cpu_irq = (irq & (NR_IRQS - 1));
 
        /* Dork with trap table if we get this far. */
 #define INSTANTIATE(table) \
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
-               SPARC_BRANCH((unsigned long) handler, \
+               SPARC_BRANCH((unsigned long) floppy_hardint, \
                             (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
        table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
@@ -399,22 +289,9 @@ static int request_fast_irq(unsigned int irq,
         * writing we have no CPU-neutral interface to fine-grained flushes.
         */
        flush_cache_all();
-
-       action->flags = irqflags;
-       action->name = devname;
-       action->dev_id = NULL;
-       action->next = NULL;
-
-       sparc_irq[cpu_irq].action = action;
-
-       __enable_irq(irq);
-
-       ret = 0;
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
-       return ret;
+       return 0;
 }
+EXPORT_SYMBOL(sparc_floppy_request_irq);
 
 /*
  * These variables are used to access state from the assembler
@@ -440,154 +317,23 @@ EXPORT_SYMBOL(pdma_base);
 unsigned long pdma_areasize;
 EXPORT_SYMBOL(pdma_areasize);
 
-static irq_handler_t floppy_irq_handler;
-
+/* Use the generic irq support to call floppy_interrupt
+ * which was setup using request_irq() in sparc_floppy_request_irq().
+ * We only have one floppy interrupt so we do not need to check
+ * for additional handlers being wired up by irq_link()
+ */
 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
-       int cpu = smp_processor_id();
 
        old_regs = set_irq_regs(regs);
-       disable_pil_irq(irq);
        irq_enter();
-       kstat_cpu(cpu).irqs[irq]++;
-       floppy_irq_handler(irq, dev_id);
+       generic_handle_irq(floppy_irq);
        irq_exit();
-       enable_pil_irq(irq);
        set_irq_regs(old_regs);
-       /*
-        * XXX Eek, it's totally changed with preempt_count() and such
-        * if (softirq_pending(cpu))
-        *      do_softirq();
-        */
-}
-
-int sparc_floppy_request_irq(int irq, unsigned long flags,
-                            irq_handler_t irq_handler)
-{
-       floppy_irq_handler = irq_handler;
-       return request_fast_irq(irq, floppy_hardint, flags, "floppy");
 }
-EXPORT_SYMBOL(sparc_floppy_request_irq);
-
 #endif
 
-int request_irq(unsigned int irq,
-               irq_handler_t handler,
-               unsigned long irqflags, const char *devname, void *dev_id)
-{
-       struct irqaction *action, **actionp;
-       unsigned long flags;
-       unsigned int cpu_irq;
-       int ret;
-
-       if (sparc_cpu_model == sun4d)
-               return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
-
-       cpu_irq = irq & (NR_IRQS - 1);
-       if (cpu_irq > 14) {
-               ret = -EINVAL;
-               goto out;
-       }
-       if (!handler) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       spin_lock_irqsave(&irq_action_lock, flags);
-
-       actionp = &sparc_irq[cpu_irq].action;
-       action = *actionp;
-       if (action) {
-               if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) {
-                       ret = -EBUSY;
-                       goto out_unlock;
-               }
-               if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
-                       printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
-                              irq);
-                       ret = -EBUSY;
-                       goto out_unlock;
-               }
-               for ( ; action; action = *actionp)
-                       actionp = &action->next;
-       }
-
-       /* If this is flagged as statically allocated then we use our
-        * private struct which is never freed.
-        */
-       if (irqflags & SA_STATIC_ALLOC) {
-               if (static_irq_count < MAX_STATIC_ALLOC)
-                       action = &static_irqaction[static_irq_count++];
-               else
-                       printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
-                              irq, devname);
-       }
-       if (action == NULL)
-               action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
-       if (!action) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       action->handler = handler;
-       action->flags = irqflags;
-       action->name = devname;
-       action->next = NULL;
-       action->dev_id = dev_id;
-
-       *actionp = action;
-
-       __enable_irq(irq);
-
-       ret = 0;
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
-       return ret;
-}
-EXPORT_SYMBOL(request_irq);
-
-void disable_irq_nosync(unsigned int irq)
-{
-       __disable_irq(irq);
-}
-EXPORT_SYMBOL(disable_irq_nosync);
-
-void disable_irq(unsigned int irq)
-{
-       __disable_irq(irq);
-}
-EXPORT_SYMBOL(disable_irq);
-
-void enable_irq(unsigned int irq)
-{
-       __enable_irq(irq);
-}
-EXPORT_SYMBOL(enable_irq);
-
-/*
- * We really don't need these at all on the Sparc.  We only have
- * stubs here because they are exported to modules.
- */
-unsigned long probe_irq_on(void)
-{
-       return 0;
-}
-EXPORT_SYMBOL(probe_irq_on);
-
-int probe_irq_off(unsigned long mask)
-{
-       return 0;
-}
-EXPORT_SYMBOL(probe_irq_off);
-
-static unsigned int build_device_irq(struct platform_device *op,
-                                     unsigned int real_irq)
-{
-       return real_irq;
-}
-
 /* djhr
  * This could probably be made indirect too and assigned in the CPU
  * bits of the code. That would be much nicer I think and would also
@@ -598,8 +344,6 @@ static unsigned int build_device_irq(struct platform_device *op,
 
 void __init init_IRQ(void)
 {
-       sparc_irq_config.build_device_irq = build_device_irq;
-
        switch (sparc_cpu_model) {
        case sun4c:
        case sun4:
@@ -607,14 +351,11 @@ void __init init_IRQ(void)
                break;
 
        case sun4m:
-#ifdef CONFIG_PCI
                pcic_probe();
-               if (pcic_present()) {
+               if (pcic_present())
                        sun4m_pci_init_IRQ();
-                       break;
-               }
-#endif
-               sun4m_init_IRQ();
+               else
+                       sun4m_init_IRQ();
                break;
 
        case sun4d:
@@ -632,9 +373,3 @@ void __init init_IRQ(void)
        btfixup();
 }
 
-#ifdef CONFIG_PROC_FS
-void init_irq_proc(void)
-{
-       /* For now, nothing... */
-}
-#endif /* CONFIG_PROC_FS */
index b1d275ce34352fa0f8c1dc136e047614cebe9e82..4e78862d12fd82aaab5359385503e0168465afcd 100644 (file)
@@ -224,13 +224,13 @@ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
        int cpuid;
 
        cpumask_copy(&mask, affinity);
-       if (cpus_equal(mask, cpu_online_map)) {
+       if (cpumask_equal(&mask, cpu_online_mask)) {
                cpuid = map_to_cpu(irq);
        } else {
                cpumask_t tmp;
 
-               cpus_and(tmp, cpu_online_map, mask);
-               cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
+               cpumask_and(&tmp, cpu_online_mask, &mask);
+               cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp);
        }
 
        return cpuid;
index 24ad449886be4ed7ce378eb362e73df312982921..6f6544cfa0ef589cf40da84b22bb867148af1e3a 100644 (file)
@@ -6,11 +6,9 @@
 #include <asm/traps.h>
 
 /* cpu.c */
-extern const char *sparc_cpu_type;
 extern const char *sparc_pmu_type;
-extern const char *sparc_fpu_type;
-
 extern unsigned int fsr_storage;
+extern int ncpus_probed;
 
 #ifdef CONFIG_SPARC32
 /* cpu.c */
@@ -37,6 +35,7 @@ extern void sun4c_init_IRQ(void);
 extern unsigned int lvl14_resolution;
 
 extern void sun4m_init_IRQ(void);
+extern void sun4m_unmask_profile_irq(void);
 extern void sun4m_clear_profile_irq(int cpu);
 
 /* sun4d_irq.c */
index 2969f777fa11fffe74f7a013bba60cdced4ae31c..2f538ac2e139b68c5ffea31b12520c0672b83a88 100644 (file)
 #include <asm/leon_amba.h>
 #include <asm/traps.h>
 #include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/setup.h>
 
 #include "prom.h"
 #include "irq.h"
 
 struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
 struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
-struct amba_apb_device leon_percpu_timer_dev[16];
 
 int leondebug_irq_disable;
 int leon_debug_irqout;
 static int dummy_master_l10_counter;
 unsigned long amba_system_id;
+static DEFINE_SPINLOCK(leon_irq_lock);
 
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
+int leon3_ticker_irq; /* Timer ticker IRQ */
 unsigned int sparc_leon_eirq;
-#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
+#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
+#define LEON_IACK (&leon3_irqctrl_regs->iclear)
+#define LEON_DO_ACK_HW 1
 
-/* Return the IRQ of the pending IRQ on the extended IRQ controller */
-int sparc_leon_eirq_get(int eirq, int cpu)
+/* Return the last ACKed IRQ by the Extended IRQ controller. It has already
+ * been (automatically) ACKed when the CPU takes the trap.
+ */
+static inline unsigned int leon_eirq_get(int cpu)
 {
        return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
 }
 
-irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id)
+/* Handle one or multiple IRQs from the extended interrupt controller */
+static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
 {
-       printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n");
-       return IRQ_HANDLED;
+       unsigned int eirq;
+       int cpu = sparc_leon3_cpuid();
+
+       eirq = leon_eirq_get(cpu);
+       if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
+               generic_handle_irq(irq_map[eirq]->irq);
 }
 
 /* The extended IRQ controller has been found, this function registers it */
-void sparc_leon_eirq_register(int eirq)
+void leon_eirq_setup(unsigned int eirq)
 {
-       int irq;
+       unsigned long mask, oldmask;
+       unsigned int veirq;
 
-       /* Register a "BAD" handler for this interrupt, it should never happen */
-       irq = request_irq(eirq, sparc_leon_eirq_isr,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
-
-       if (irq) {
-               printk(KERN_ERR
-                      "sparc_leon_eirq_register: unable to attach IRQ%d\n",
-                      eirq);
-       } else {
-               sparc_leon_eirq = eirq;
+       if (eirq < 1 || eirq > 0xf) {
+               printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq);
+               return;
        }
 
+       veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0);
+
+       /*
+        * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
+        * controller have a mask-bit of their own, so this is safe.
+        */
+       irq_link(veirq);
+       mask = 1 << eirq;
+       oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id));
+       LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask));
+       sparc_leon_eirq = eirq;
 }
 
 static inline unsigned long get_irqmask(unsigned int irq)
@@ -83,35 +100,151 @@ static inline unsigned long get_irqmask(unsigned int irq)
        return mask;
 }
 
-static void leon_enable_irq(unsigned int irq_nr)
+#ifdef CONFIG_SMP
+static int irq_choose_cpu(const struct cpumask *affinity)
 {
-       unsigned long mask, flags;
-       mask = get_irqmask(irq_nr);
-       local_irq_save(flags);
-       LEON3_BYPASS_STORE_PA(LEON_IMASK,
-                             (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask)));
-       local_irq_restore(flags);
+       cpumask_t mask;
+
+       cpus_and(mask, cpu_online_map, *affinity);
+       if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask))
+               return boot_cpu_id;
+       else
+               return first_cpu(mask);
 }
+#else
+#define irq_choose_cpu(affinity) boot_cpu_id
+#endif
 
-static void leon_disable_irq(unsigned int irq_nr)
+static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
+                            bool force)
 {
-       unsigned long mask, flags;
-       mask = get_irqmask(irq_nr);
-       local_irq_save(flags);
-       LEON3_BYPASS_STORE_PA(LEON_IMASK,
-                             (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask)));
-       local_irq_restore(flags);
+       unsigned long mask, oldmask, flags;
+       int oldcpu, newcpu;
+
+       mask = (unsigned long)data->chip_data;
+       oldcpu = irq_choose_cpu(data->affinity);
+       newcpu = irq_choose_cpu(dest);
+
+       if (oldcpu == newcpu)
+               goto out;
+
+       /* unmask on old CPU first before enabling on the selected CPU */
+       spin_lock_irqsave(&leon_irq_lock, flags);
+       oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu));
+       LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask));
+       oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu));
+       LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask));
+       spin_unlock_irqrestore(&leon_irq_lock, flags);
+out:
+       return IRQ_SET_MASK_OK;
+}
+
+static void leon_unmask_irq(struct irq_data *data)
+{
+       unsigned long mask, oldmask, flags;
+       int cpu;
+
+       mask = (unsigned long)data->chip_data;
+       cpu = irq_choose_cpu(data->affinity);
+       spin_lock_irqsave(&leon_irq_lock, flags);
+       oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
+       LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
+       spin_unlock_irqrestore(&leon_irq_lock, flags);
+}
+
+static void leon_mask_irq(struct irq_data *data)
+{
+       unsigned long mask, oldmask, flags;
+       int cpu;
+
+       mask = (unsigned long)data->chip_data;
+       cpu = irq_choose_cpu(data->affinity);
+       spin_lock_irqsave(&leon_irq_lock, flags);
+       oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
+       LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
+       spin_unlock_irqrestore(&leon_irq_lock, flags);
+}
+
+static unsigned int leon_startup_irq(struct irq_data *data)
+{
+       irq_link(data->irq);
+       leon_unmask_irq(data);
+       return 0;
+}
 
+static void leon_shutdown_irq(struct irq_data *data)
+{
+       leon_mask_irq(data);
+       irq_unlink(data->irq);
+}
+
+/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
+static void leon_eoi_irq(struct irq_data *data)
+{
+       unsigned long mask = (unsigned long)data->chip_data;
+
+       if (mask & LEON_DO_ACK_HW)
+               LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW);
+}
+
+static struct irq_chip leon_irq = {
+       .name                   = "leon",
+       .irq_startup            = leon_startup_irq,
+       .irq_shutdown           = leon_shutdown_irq,
+       .irq_mask               = leon_mask_irq,
+       .irq_unmask             = leon_unmask_irq,
+       .irq_eoi                = leon_eoi_irq,
+       .irq_set_affinity       = leon_set_affinity,
+};
+
+/*
+ * Build a LEON IRQ for the edge triggered LEON IRQ controller:
+ *  Edge (normal) IRQ           - handle_simple_irq, ack=DONT-CARE, never ack
+ *  Level IRQ (PCI|Level-GPIO)  - handle_fasteoi_irq, ack=1, ack after ISR
+ *  Per-CPU Edge                - handle_percpu_irq, ack=0
+ */
+unsigned int leon_build_device_irq(unsigned int real_irq,
+                                   irq_flow_handler_t flow_handler,
+                                   const char *name, int do_ack)
+{
+       unsigned int irq;
+       unsigned long mask;
+
+       irq = 0;
+       mask = get_irqmask(real_irq);
+       if (mask == 0)
+               goto out;
+
+       irq = irq_alloc(real_irq, real_irq);
+       if (irq == 0)
+               goto out;
+
+       if (do_ack)
+               mask |= LEON_DO_ACK_HW;
+
+       irq_set_chip_and_handler_name(irq, &leon_irq,
+                                     flow_handler, name);
+       irq_set_chip_data(irq, (void *)mask);
+
+out:
+       return irq;
+}
+
+static unsigned int _leon_build_device_irq(struct platform_device *op,
+                                          unsigned int real_irq)
+{
+       return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
 }
 
 void __init leon_init_timers(irq_handler_t counter_fn)
 {
-       int irq;
+       int irq, eirq;
        struct device_node *rootnp, *np, *nnp;
        struct property *pp;
        int len;
-       int cpu, icsel;
+       int icsel;
        int ampopts;
+       int err;
 
        leondebug_irq_disable = 0;
        leon_debug_irqout = 0;
@@ -173,98 +306,85 @@ void __init leon_init_timers(irq_handler_t counter_fn)
                        leon3_gptimer_irq = *(unsigned int *)pp->value;
        } while (0);
 
-       if (leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq) {
-               LEON3_BYPASS_STORE_PA(
-                       &leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
-               LEON3_BYPASS_STORE_PA(
-                       &leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
-                       (((1000000 / HZ) - 1)));
-               LEON3_BYPASS_STORE_PA(
+       if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
+               goto bad;
+
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
+                               (((1000000 / HZ) - 1)));
+       LEON3_BYPASS_STORE_PA(
                        &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
 
 #ifdef CONFIG_SMP
-               leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs;
-               leon_percpu_timer_dev[0].irq = leon3_gptimer_irq + 1 +
-                                              leon3_gptimer_idx;
-
-               if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
-                     (1<<LEON3_GPTIMER_SEPIRQ))) {
-                       prom_printf("irq timer not configured with separate irqs\n");
-                       BUG();
-               }
+       leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
 
-               LEON3_BYPASS_STORE_PA(
-                       &leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, 0);
-               LEON3_BYPASS_STORE_PA(
-                       &leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
-                       (((1000000/HZ) - 1)));
-               LEON3_BYPASS_STORE_PA(
-                       &leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, 0);
-# endif
-
-               /*
-                * The IRQ controller may (if implemented) consist of multiple
-                * IRQ controllers, each mapped on a 4Kb boundary.
-                * Each CPU may be routed to different IRQCTRLs, however
-                * we assume that all CPUs (in SMP system) is routed to the
-                * same IRQ Controller, and for non-SMP only one IRQCTRL is
-                * accessed anyway.
-                * In AMP systems, Linux must run on CPU0 for the time being.
-                */
-               cpu = sparc_leon3_cpuid();
-               icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[cpu/8]);
-               icsel = (icsel >> ((7 - (cpu&0x7)) * 4)) & 0xf;
-               leon3_irqctrl_regs += icsel;
-       } else {
-               goto bad;
+       if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
+             (1<<LEON3_GPTIMER_SEPIRQ))) {
+               printk(KERN_ERR "timer not configured with separate irqs\n");
+               BUG();
        }
 
-       irq = request_irq(leon3_gptimer_irq+leon3_gptimer_idx,
-                         counter_fn,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
+                               0);
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
+                               (((1000000/HZ) - 1)));
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+                               0);
+#endif
 
-       if (irq) {
-               printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n",
-                      LEON_INTERRUPT_TIMER1);
+       /*
+        * The IRQ controller may (if implemented) consist of multiple
+        * IRQ controllers, each mapped on a 4Kb boundary.
+        * Each CPU may be routed to different IRQCTRLs, however
+        * we assume that all CPUs (in SMP system) is routed to the
+        * same IRQ Controller, and for non-SMP only one IRQCTRL is
+        * accessed anyway.
+        * In AMP systems, Linux must run on CPU0 for the time being.
+        */
+       icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]);
+       icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf;
+       leon3_irqctrl_regs += icsel;
+
+       /* Mask all IRQs on boot-cpu IRQ controller */
+       LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0);
+
+       /* Probe extended IRQ controller */
+       eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus)
+               >> 16) & 0xf;
+       if (eirq != 0)
+               leon_eirq_setup(eirq);
+
+       irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
+       err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
+       if (err) {
+               printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
                prom_halt();
        }
 
-# ifdef CONFIG_SMP
-       {
-               unsigned long flags;
-               struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)];
-
-               /* For SMP we use the level 14 ticker, however the bootup code
-                * has copied the firmwares level 14 vector into boot cpu's
-                * trap table, we must fix this now or we get squashed.
-                */
-               local_irq_save(flags);
-
-               patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
-
-               /* Adjust so that we jump directly to smpleon_ticker */
-               trap_table->inst_three += smpleon_ticker - real_irq_entry;
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+                             LEON3_GPTIMER_EN |
+                             LEON3_GPTIMER_RL |
+                             LEON3_GPTIMER_LD |
+                             LEON3_GPTIMER_IRQEN);
 
-               local_flush_cache_all();
-               local_irq_restore(flags);
+#ifdef CONFIG_SMP
+       /* Install per-cpu IRQ handler for broadcasted ticker */
+       irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
+                                   "per-cpu", 0);
+       err = request_irq(irq, leon_percpu_timer_interrupt,
+                         IRQF_PERCPU | IRQF_TIMER, "ticker",
+                         NULL);
+       if (err) {
+               printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
+               prom_halt();
        }
-# endif
-
-       if (leon3_gptimer_regs) {
-               LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
-                                     LEON3_GPTIMER_EN |
-                                     LEON3_GPTIMER_RL |
-                                     LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
 
-#ifdef CONFIG_SMP
-               LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-                                     LEON3_GPTIMER_EN |
-                                     LEON3_GPTIMER_RL |
-                                     LEON3_GPTIMER_LD |
-                                     LEON3_GPTIMER_IRQEN);
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+                             LEON3_GPTIMER_EN |
+                             LEON3_GPTIMER_RL |
+                             LEON3_GPTIMER_LD |
+                             LEON3_GPTIMER_IRQEN);
 #endif
-
-       }
        return;
 bad:
        printk(KERN_ERR "No Timer/irqctrl found\n");
@@ -281,9 +401,6 @@ void leon_load_profile_irq(int cpu, unsigned int limit)
        BUG();
 }
 
-
-
-
 void __init leon_trans_init(struct device_node *dp)
 {
        if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
@@ -337,22 +454,18 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
 {
        unsigned long mask, flags, *addr;
        mask = get_irqmask(irq_nr);
-       local_irq_save(flags);
-       addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]);
-       LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask)));
-       local_irq_restore(flags);
+       spin_lock_irqsave(&leon_irq_lock, flags);
+       addr = (unsigned long *)LEON_IMASK(cpu);
+       LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
+       spin_unlock_irqrestore(&leon_irq_lock, flags);
 }
 
 #endif
 
 void __init leon_init_IRQ(void)
 {
-       sparc_irq_config.init_timers = leon_init_timers;
-
-       BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
+       sparc_irq_config.init_timers      = leon_init_timers;
+       sparc_irq_config.build_device_irq = _leon_build_device_irq;
 
        BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
                        BTFIXUPCALL_NORM);
index 8f5de4aa3c0ab2b8b7cd8c1fad1cf8636f4cb9fa..fe8fb44c609c57fc9fa3dd5eb4881a6c9e30731b 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/of.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
@@ -29,6 +30,7 @@
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
 #include <asm/irq_regs.h>
+#include <asm/traps.h>
 
 #include <asm/delay.h>
 #include <asm/irq.h>
 extern ctxd_t *srmmu_ctx_table_phys;
 static int smp_processors_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern unsigned char boot_cpu_id;
 extern cpumask_t smp_commenced_mask;
 void __init leon_configure_cache_smp(void);
+static void leon_ipi_init(void);
+
+/* IRQ number of LEON IPIs */
+int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
 
 static inline unsigned long do_swap(volatile unsigned long *ptr,
                                    unsigned long val)
@@ -94,8 +99,6 @@ void __cpuinit leon_callin(void)
        local_flush_cache_all();
        local_flush_tlb_all();
 
-       cpu_probe();
-
        /* Fix idle thread fields. */
        __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
                             : "memory" /* paranoid */);
@@ -104,11 +107,11 @@ void __cpuinit leon_callin(void)
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
 
-       while (!cpu_isset(cpuid, smp_commenced_mask))
+       while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                mb();
 
        local_irq_enable();
-       cpu_set(cpuid, cpu_online_map);
+       set_cpu_online(cpuid, true);
 }
 
 /*
@@ -179,13 +182,16 @@ void __init leon_boot_cpus(void)
        int nrcpu = leon_smp_nrcpus();
        int me = smp_processor_id();
 
+       /* Setup IPI */
+       leon_ipi_init();
+
        printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
               (unsigned int)nrcpu, (unsigned int)NR_CPUS,
               (unsigned int)&(leon3_irqctrl_regs->mpstatus));
 
        leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
        leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
-       leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me);
+       leon_enable_irq_cpu(leon_ipi_irq, me);
 
        leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
 
@@ -220,6 +226,10 @@ int __cpuinit leon_boot_one_cpu(int i)
               (unsigned int)&leon3_irqctrl_regs->mpstatus);
        local_flush_cache_all();
 
+       /* Make sure all IRQs are of from the start for this new CPU */
+       LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
+
+       /* Wake one CPU */
        LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
 
        /* wheee... it's going... */
@@ -236,7 +246,7 @@ int __cpuinit leon_boot_one_cpu(int i)
        } else {
                leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
                leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
-               leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i);
+               leon_enable_irq_cpu(leon_ipi_irq, i);
        }
 
        local_flush_cache_all();
@@ -262,21 +272,21 @@ void __init leon_smp_done(void)
        local_flush_cache_all();
 
        /* Free unneeded trap tables */
-       if (!cpu_isset(1, cpu_present_map)) {
+       if (!cpu_present(1)) {
                ClearPageReserved(virt_to_page(&trapbase_cpu1));
                init_page_count(virt_to_page(&trapbase_cpu1));
                free_page((unsigned long)&trapbase_cpu1);
                totalram_pages++;
                num_physpages++;
        }
-       if (!cpu_isset(2, cpu_present_map)) {
+       if (!cpu_present(2)) {
                ClearPageReserved(virt_to_page(&trapbase_cpu2));
                init_page_count(virt_to_page(&trapbase_cpu2));
                free_page((unsigned long)&trapbase_cpu2);
                totalram_pages++;
                num_physpages++;
        }
-       if (!cpu_isset(3, cpu_present_map)) {
+       if (!cpu_present(3)) {
                ClearPageReserved(virt_to_page(&trapbase_cpu3));
                init_page_count(virt_to_page(&trapbase_cpu3));
                free_page((unsigned long)&trapbase_cpu3);
@@ -292,6 +302,99 @@ void leon_irq_rotate(int cpu)
 {
 }
 
+struct leon_ipi_work {
+       int single;
+       int msk;
+       int resched;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
+
+/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
+ * is used for all three types of IPIs.
+ */
+static void __init leon_ipi_init(void)
+{
+       int cpu, len;
+       struct leon_ipi_work *work;
+       struct property *pp;
+       struct device_node *rootnp;
+       struct tt_entry *trap_table;
+       unsigned long flags;
+
+       /* Find IPI IRQ or stick with default value */
+       rootnp = of_find_node_by_path("/ambapp0");
+       if (rootnp) {
+               pp = of_find_property(rootnp, "ipi_num", &len);
+               if (pp && (*(int *)pp->value))
+                       leon_ipi_irq = *(int *)pp->value;
+       }
+       printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
+
+       /* Adjust so that we jump directly to smpleon_ipi */
+       local_irq_save(flags);
+       trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
+       trap_table->inst_three += smpleon_ipi - real_irq_entry;
+       local_flush_cache_all();
+       local_irq_restore(flags);
+
+       for_each_possible_cpu(cpu) {
+               work = &per_cpu(leon_ipi_work, cpu);
+               work->single = work->msk = work->resched = 0;
+       }
+}
+
+static void leon_ipi_single(int cpu)
+{
+       struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+       /* Mark work */
+       work->single = 1;
+
+       /* Generate IRQ on the CPU */
+       set_cpu_int(cpu, leon_ipi_irq);
+}
+
+static void leon_ipi_mask_one(int cpu)
+{
+       struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+       /* Mark work */
+       work->msk = 1;
+
+       /* Generate IRQ on the CPU */
+       set_cpu_int(cpu, leon_ipi_irq);
+}
+
+static void leon_ipi_resched(int cpu)
+{
+       struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
+
+       /* Mark work */
+       work->resched = 1;
+
+       /* Generate IRQ on the CPU (any IRQ will cause resched) */
+       set_cpu_int(cpu, leon_ipi_irq);
+}
+
+void leonsmp_ipi_interrupt(void)
+{
+       struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
+
+       if (work->single) {
+               work->single = 0;
+               smp_call_function_single_interrupt();
+       }
+       if (work->msk) {
+               work->msk = 0;
+               smp_call_function_interrupt();
+       }
+       if (work->resched) {
+               work->resched = 0;
+               smp_resched_interrupt();
+       }
+}
+
 static struct smp_funcall {
        smpfunc_t func;
        unsigned long arg1;
@@ -337,10 +440,10 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
                {
                        register int i;
 
-                       cpu_clear(smp_processor_id(), mask);
-                       cpus_and(mask, cpu_online_map, mask);
+                       cpumask_clear_cpu(smp_processor_id(), &mask);
+                       cpumask_and(&mask, cpu_online_mask, &mask);
                        for (i = 0; i <= high; i++) {
-                               if (cpu_isset(i, mask)) {
+                               if (cpumask_test_cpu(i, &mask)) {
                                        ccall_info.processors_in[i] = 0;
                                        ccall_info.processors_out[i] = 0;
                                        set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
@@ -354,7 +457,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
 
                                while (!ccall_info.processors_in[i])
@@ -363,7 +466,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
 
                                while (!ccall_info.processors_out[i])
@@ -386,27 +489,23 @@ void leon_cross_call_irq(void)
        ccall_info.processors_out[i] = 1;
 }
 
-void leon_percpu_timer_interrupt(struct pt_regs *regs)
+irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused)
 {
-       struct pt_regs *old_regs;
        int cpu = smp_processor_id();
 
-       old_regs = set_irq_regs(regs);
-
        leon_clear_profile_irq(cpu);
 
        profile_tick(CPU_PROFILING);
 
        if (!--prof_counter(cpu)) {
-               int user = user_mode(regs);
+               int user = user_mode(get_irq_regs());
 
-               irq_enter();
                update_process_times(user);
-               irq_exit();
 
                prof_counter(cpu) = prof_multiplier(cpu);
        }
-       set_irq_regs(old_regs);
+
+       return IRQ_HANDLED;
 }
 
 static void __init smp_setup_percpu_timer(void)
@@ -449,6 +548,9 @@ void __init leon_init_smp(void)
        BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
                        BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM);
 }
 
 #endif /* CONFIG_SPARC_LEON */
index 56db06432ce9fba3065e95ce26c9bc213a2f77d0..42f28c7420e1d99a1ac5abc3d44bc45a3e5fea54 100644 (file)
@@ -768,7 +768,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl
                               cpuid, NR_CPUS);
                        continue;
                }
-               if (!cpu_isset(cpuid, *mask))
+               if (!cpumask_test_cpu(cpuid, mask))
                        continue;
 #endif
 
index 5c149689bb20c365144dd000ae2ece677ced82f7..3bb2eace58cf0b4ea5ec2f08b34228904b335074 100644 (file)
@@ -622,8 +622,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
 out:
        nid = of_node_to_nid(dp);
        if (nid != -1) {
-               cpumask_t numa_mask = *cpumask_of_node(nid);
+               cpumask_t numa_mask;
 
+               cpumask_copy(&numa_mask, cpumask_of_node(nid));
                irq_set_affinity(irq, &numa_mask);
        }
 
index 30982e9ab62669ae90ce804c977b3cdb7b20c6e8..580651af73f2bc4a39d78a9e538e66510aa315eb 100644 (file)
@@ -284,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
 
        nid = pbm->numa_node;
        if (nid != -1) {
-               cpumask_t numa_mask = *cpumask_of_node(nid);
+               cpumask_t numa_mask;
 
+               cpumask_copy(&numa_mask, cpumask_of_node(nid));
                irq_set_affinity(irq, &numa_mask);
        }
        err = request_irq(irq, sparc64_msiq_interrupt, 0,
index 2cdc131b50acd04040762e9c814a1035d95087f1..948601a066ff9b5d6d53769a3fb0186770cbb2bd 100644 (file)
@@ -164,6 +164,9 @@ void __iomem *pcic_regs;
 volatile int pcic_speculative;
 volatile int pcic_trapped;
 
+/* forward */
+unsigned int pcic_build_device_irq(struct platform_device *op,
+                                   unsigned int real_irq);
 
 #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
 
@@ -523,6 +526,7 @@ static void
 pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
 {
        struct pcic_ca2irq *p;
+       unsigned int real_irq;
        int i, ivec;
        char namebuf[64];
 
@@ -551,26 +555,25 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
        i = p->pin;
        if (i >= 0 && i < 4) {
                ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
-               dev->irq = ivec >> (i << 2) & 0xF;
+               real_irq = ivec >> (i << 2) & 0xF;
        } else if (i >= 4 && i < 8) {
                ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
-               dev->irq = ivec >> ((i-4) << 2) & 0xF;
+               real_irq = ivec >> ((i-4) << 2) & 0xF;
        } else {                                        /* Corrupted map */
                printk("PCIC: BAD PIN %d\n", i); for (;;) {}
        }
 /* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */
 
-       /*
-        * dev->irq=0 means PROM did not bother to program the upper
+       /* real_irq means PROM did not bother to program the upper
         * half of PCIC. This happens on JS-E with PROM 3.11, for instance.
         */
-       if (dev->irq == 0 || p->force) {
+       if (real_irq == 0 || p->force) {
                if (p->irq == 0 || p->irq >= 15) {      /* Corrupted map */
                        printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {}
                }
                printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n",
                    p->irq, p->pin, dev->bus->number, dev->devfn);
-               dev->irq = p->irq;
+               real_irq = p->irq;
 
                i = p->pin;
                if (i >= 4) {
@@ -584,7 +587,8 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
                        ivec |= p->irq << (i << 2);
                        writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO);
                }
-       }
+       }
+       dev->irq = pcic_build_device_irq(NULL, real_irq);
 }
 
 /*
@@ -729,6 +733,7 @@ void __init pci_time_init(void)
        struct linux_pcic *pcic = &pcic0;
        unsigned long v;
        int timer_irq, irq;
+       int err;
 
        do_arch_gettimeoffset = pci_gettimeoffset;
 
@@ -740,9 +745,10 @@ void __init pci_time_init(void)
        timer_irq = PCI_COUNTER_IRQ_SYS(v);
        writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
                pcic->pcic_regs+PCI_COUNTER_IRQ);
-       irq = request_irq(timer_irq, pcic_timer_handler,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
-       if (irq) {
+       irq = pcic_build_device_irq(NULL, timer_irq);
+       err = request_irq(irq, pcic_timer_handler,
+                         IRQF_TIMER, "timer", NULL);
+       if (err) {
                prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
                prom_halt();
        }
@@ -803,50 +809,73 @@ static inline unsigned long get_irqmask(int irq_nr)
        return 1 << irq_nr;
 }
 
-static void pcic_disable_irq(unsigned int irq_nr)
+static void pcic_mask_irq(struct irq_data *data)
 {
        unsigned long mask, flags;
 
-       mask = get_irqmask(irq_nr);
+       mask = (unsigned long)data->chip_data;
        local_irq_save(flags);
        writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
        local_irq_restore(flags);
 }
 
-static void pcic_enable_irq(unsigned int irq_nr)
+static void pcic_unmask_irq(struct irq_data *data)
 {
        unsigned long mask, flags;
 
-       mask = get_irqmask(irq_nr);
+       mask = (unsigned long)data->chip_data;
        local_irq_save(flags);
        writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
        local_irq_restore(flags);
 }
 
-static void pcic_load_profile_irq(int cpu, unsigned int limit)
+static unsigned int pcic_startup_irq(struct irq_data *data)
 {
-       printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
+       irq_link(data->irq);
+       pcic_unmask_irq(data);
+       return 0;
 }
 
-/* We assume the caller has disabled local interrupts when these are called,
- * or else very bizarre behavior will result.
- */
-static void pcic_disable_pil_irq(unsigned int pil)
+static struct irq_chip pcic_irq = {
+       .name           = "pcic",
+       .irq_startup    = pcic_startup_irq,
+       .irq_mask       = pcic_mask_irq,
+       .irq_unmask     = pcic_unmask_irq,
+};
+
+unsigned int pcic_build_device_irq(struct platform_device *op,
+                                   unsigned int real_irq)
 {
-       writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET);
+       unsigned int irq;
+       unsigned long mask;
+
+       irq = 0;
+       mask = get_irqmask(real_irq);
+       if (mask == 0)
+               goto out;
+
+       irq = irq_alloc(real_irq, real_irq);
+       if (irq == 0)
+               goto out;
+
+       irq_set_chip_and_handler_name(irq, &pcic_irq,
+                                     handle_level_irq, "PCIC");
+       irq_set_chip_data(irq, (void *)mask);
+
+out:
+       return irq;
 }
 
-static void pcic_enable_pil_irq(unsigned int pil)
+
+static void pcic_load_profile_irq(int cpu, unsigned int limit)
 {
-       writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR);
+       printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__);
 }
 
 void __init sun4m_pci_init_IRQ(void)
 {
-       BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM);
+       sparc_irq_config.build_device_irq = pcic_build_device_irq;
+
        BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
 }
index ee8426ede7c76ae5617ddb58a95d93a3f2c973ce..2cb0e1c001e254baea6895ad399776cec8c6c699 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/nmi.h>
 #include <asm/pcr.h>
 
+#include "kernel.h"
 #include "kstack.h"
 
 /* Sparc64 chips have two performance counters, 32-bits each, with
index 17529298c50a2e283ede3342830747fe54641384..c8cc461ff75f040a974179aa7383ba501dde017a 100644 (file)
@@ -128,8 +128,16 @@ void cpu_idle(void)
         set_thread_flag(TIF_POLLING_NRFLAG);
        /* endless idle loop with no priority at all */
        while(1) {
-               while (!need_resched())
-                       cpu_relax();
+#ifdef CONFIG_SPARC_LEON
+               if (pm_idle) {
+                       while (!need_resched())
+                               (*pm_idle)();
+               } else
+#endif
+               {
+                       while (!need_resched())
+                               cpu_relax();
+               }
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
index 05fb253305831095c660bb3aab077ac837f8f16e..5ce3d15a99b0fc13016ab143967a4aa1df8ba478 100644 (file)
@@ -326,7 +326,6 @@ void __init of_console_init(void)
                        of_console_options = NULL;
        }
 
-       prom_printf(msg, of_console_path);
        printk(msg, of_console_path);
 }
 
index 7b8b76c9557f2bc0ce0a5c37ec073a00fb5db8b7..3609bdee9ed294d1d6b88cea15fbc92a7ac2297a 100644 (file)
@@ -103,16 +103,20 @@ static unsigned int boot_flags __initdata = 0;
 /* Exported for mm/init.c:paging_init. */
 unsigned long cmdline_memory_size __initdata = 0;
 
+/* which CPU booted us (0xff = not set) */
+unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
+unsigned char boot_cpu_id4; /* boot_cpu_id << 2 */
+
 static void
 prom_console_write(struct console *con, const char *s, unsigned n)
 {
        prom_write(s, n);
 }
 
-static struct console prom_debug_console = {
-       .name =         "debug",
+static struct console prom_early_console = {
+       .name =         "earlyprom",
        .write =        prom_console_write,
-       .flags =        CON_PRINTBUFFER,
+       .flags =        CON_PRINTBUFFER | CON_BOOT,
        .index =        -1,
 };
 
@@ -133,8 +137,7 @@ static void __init process_switch(char c)
                prom_halt();
                break;
        case 'p':
-               /* Use PROM debug console. */
-               register_console(&prom_debug_console);
+               /* Just ignore, this behavior is now the default.  */
                break;
        default:
                printk("Unknown boot switch (-%c)\n", c);
@@ -215,6 +218,10 @@ void __init setup_arch(char **cmdline_p)
        strcpy(boot_command_line, *cmdline_p);
        parse_early_param();
 
+       boot_flags_init(*cmdline_p);
+
+       register_console(&prom_early_console);
+
        /* Set sparc_cpu_model */
        sparc_cpu_model = sun_unknown;
        if (!strcmp(&cputypval[0], "sun4 "))
@@ -265,7 +272,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_DUMMY_CONSOLE
        conswitchp = &dummy_con;
 #endif
-       boot_flags_init(*cmdline_p);
 
        idprom_init();
        if (ARCH_SUN4C)
@@ -311,75 +317,6 @@ void __init setup_arch(char **cmdline_p)
        smp_setup_cpu_possible_map();
 }
 
-static int ncpus_probed;
-
-static int show_cpuinfo(struct seq_file *m, void *__unused)
-{
-       seq_printf(m,
-                  "cpu\t\t: %s\n"
-                  "fpu\t\t: %s\n"
-                  "promlib\t\t: Version %d Revision %d\n"
-                  "prom\t\t: %d.%d\n"
-                  "type\t\t: %s\n"
-                  "ncpus probed\t: %d\n"
-                  "ncpus active\t: %d\n"
-#ifndef CONFIG_SMP
-                  "CPU0Bogo\t: %lu.%02lu\n"
-                  "CPU0ClkTck\t: %ld\n"
-#endif
-                  ,
-                  sparc_cpu_type,
-                  sparc_fpu_type ,
-                  romvec->pv_romvers,
-                  prom_rev,
-                  romvec->pv_printrev >> 16,
-                  romvec->pv_printrev & 0xffff,
-                  &cputypval[0],
-                  ncpus_probed,
-                  num_online_cpus()
-#ifndef CONFIG_SMP
-                  , cpu_data(0).udelay_val/(500000/HZ),
-                  (cpu_data(0).udelay_val/(5000/HZ)) % 100,
-                  cpu_data(0).clock_tick
-#endif
-               );
-
-#ifdef CONFIG_SMP
-       smp_bogo(m);
-#endif
-       mmu_info(m);
-#ifdef CONFIG_SMP
-       smp_info(m);
-#endif
-       return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       /* The pointer we are returning is arbitrary,
-        * it just has to be non-NULL and not IS_ERR
-        * in the success case.
-        */
-       return *pos == 0 ? &c_start : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-       .start =c_start,
-       .next = c_next,
-       .stop = c_stop,
-       .show = show_cpuinfo,
-};
-
 extern int stop_a_enabled;
 
 void sun_do_break(void)
index 29bafe051bb172ab8878dbc960a65730eea9e7f3..f3b6850cc8db4e4e59e5b975fcd6184af2c2d70c 100644 (file)
@@ -339,84 +339,6 @@ void __init setup_arch(char **cmdline_p)
        paging_init();
 }
 
-/* BUFFER is PAGE_SIZE bytes long. */
-
-extern void smp_info(struct seq_file *);
-extern void smp_bogo(struct seq_file *);
-extern void mmu_info(struct seq_file *);
-
-unsigned int dcache_parity_tl1_occurred;
-unsigned int icache_parity_tl1_occurred;
-
-int ncpus_probed;
-
-static int show_cpuinfo(struct seq_file *m, void *__unused)
-{
-       seq_printf(m, 
-                  "cpu\t\t: %s\n"
-                  "fpu\t\t: %s\n"
-                  "pmu\t\t: %s\n"
-                  "prom\t\t: %s\n"
-                  "type\t\t: %s\n"
-                  "ncpus probed\t: %d\n"
-                  "ncpus active\t: %d\n"
-                  "D$ parity tl1\t: %u\n"
-                  "I$ parity tl1\t: %u\n"
-#ifndef CONFIG_SMP
-                  "Cpu0ClkTck\t: %016lx\n"
-#endif
-                  ,
-                  sparc_cpu_type,
-                  sparc_fpu_type,
-                  sparc_pmu_type,
-                  prom_version,
-                  ((tlb_type == hypervisor) ?
-                   "sun4v" :
-                   "sun4u"),
-                  ncpus_probed,
-                  num_online_cpus(),
-                  dcache_parity_tl1_occurred,
-                  icache_parity_tl1_occurred
-#ifndef CONFIG_SMP
-                  , cpu_data(0).clock_tick
-#endif
-               );
-#ifdef CONFIG_SMP
-       smp_bogo(m);
-#endif
-       mmu_info(m);
-#ifdef CONFIG_SMP
-       smp_info(m);
-#endif
-       return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       /* The pointer we are returning is arbitrary,
-        * it just has to be non-NULL and not IS_ERR
-        * in the success case.
-        */
-       return *pos == 0 ? &c_start : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-       .start =c_start,
-       .next = c_next,
-       .stop = c_stop,
-       .show = show_cpuinfo,
-};
-
 extern int stop_a_enabled;
 
 void sun_do_break(void)
index 442286d83435d115403ac71af53b7a6ce81c88fa..d5b3958be0b40ac25fdcb7d209d10d950c7df80e 100644 (file)
@@ -37,8 +37,6 @@
 #include "irq.h"
 
 volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
-unsigned char boot_cpu_id = 0;
-unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
 
 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
@@ -130,14 +128,57 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
 void smp_send_reschedule(int cpu)
 {
        /*
-        * XXX missing reschedule IPI, see scheduler_ipi()
+        * CPU model dependent way of implementing IPI generation targeting
+        * a single CPU. The trap handler needs only to do trap entry/return
+        * to call schedule.
         */
+       BTFIXUP_CALL(smp_ipi_resched)(cpu);
 }
 
 void smp_send_stop(void)
 {
 }
 
+void arch_send_call_function_single_ipi(int cpu)
+{
+       /* trigger one IPI single call on one CPU */
+       BTFIXUP_CALL(smp_ipi_single)(cpu);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       int cpu;
+
+       /* trigger IPI mask call on each CPU */
+       for_each_cpu(cpu, mask)
+               BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
+}
+
+void smp_resched_interrupt(void)
+{
+       irq_enter();
+       scheduler_ipi();
+       local_cpu_data().irq_resched_count++;
+       irq_exit();
+       /* re-schedule routine called by interrupt return code. */
+}
+
+void smp_call_function_single_interrupt(void)
+{
+       irq_enter();
+       generic_smp_call_function_single_interrupt();
+       local_cpu_data().irq_call_count++;
+       irq_exit();
+}
+
+void smp_call_function_interrupt(void)
+{
+       irq_enter();
+       generic_smp_call_function_interrupt();
+       local_cpu_data().irq_call_count++;
+       irq_exit();
+}
+
 void smp_flush_cache_all(void)
 {
        xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
@@ -153,9 +194,10 @@ void smp_flush_tlb_all(void)
 void smp_flush_cache_mm(struct mm_struct *mm)
 {
        if(mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask))
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
                local_flush_cache_mm(mm);
        }
@@ -164,9 +206,10 @@ void smp_flush_cache_mm(struct mm_struct *mm)
 void smp_flush_tlb_mm(struct mm_struct *mm)
 {
        if(mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask)) {
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask)) {
                        xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
                        if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
                                cpumask_copy(mm_cpumask(mm),
@@ -182,9 +225,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
        struct mm_struct *mm = vma->vm_mm;
 
        if (mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask))
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
                local_flush_cache_range(vma, start, end);
        }
@@ -196,9 +240,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        struct mm_struct *mm = vma->vm_mm;
 
        if (mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask))
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
                        xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
                local_flush_tlb_range(vma, start, end);
        }
@@ -209,9 +254,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
        struct mm_struct *mm = vma->vm_mm;
 
        if(mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask))
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
                local_flush_cache_page(vma, page);
        }
@@ -222,19 +268,15 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        struct mm_struct *mm = vma->vm_mm;
 
        if(mm->context != NO_CONTEXT) {
-               cpumask_t cpu_mask = *mm_cpumask(mm);
-               cpu_clear(smp_processor_id(), cpu_mask);
-               if (!cpus_empty(cpu_mask))
+               cpumask_t cpu_mask;
+               cpumask_copy(&cpu_mask, mm_cpumask(mm));
+               cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+               if (!cpumask_empty(&cpu_mask))
                        xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
                local_flush_tlb_page(vma, page);
        }
 }
 
-void smp_reschedule_irq(void)
-{
-       set_need_resched();
-}
-
 void smp_flush_page_to_ram(unsigned long page)
 {
        /* Current theory is that those who call this are the one's
@@ -251,9 +293,10 @@ void smp_flush_page_to_ram(unsigned long page)
 
 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
 {
-       cpumask_t cpu_mask = *mm_cpumask(mm);
-       cpu_clear(smp_processor_id(), cpu_mask);
-       if (!cpus_empty(cpu_mask))
+       cpumask_t cpu_mask;
+       cpumask_copy(&cpu_mask, mm_cpumask(mm));
+       cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+       if (!cpumask_empty(&cpu_mask))
                xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
        local_flush_sig_insns(mm, insn_addr);
 }
@@ -407,7 +450,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
        };
 
        if (!ret) {
-               cpu_set(cpu, smp_commenced_mask);
+               cpumask_set_cpu(cpu, &smp_commenced_mask);
                while (!cpu_online(cpu))
                        mb();
        }
index 9478da7fdb3e7017921259dfb664452be3f2e6ce..99cb17251bb55be51dc29df0ddc6ec36694d2074 100644 (file)
@@ -121,11 +121,11 @@ void __cpuinit smp_callin(void)
        /* inform the notifiers about the new cpu */
        notify_cpu_starting(cpuid);
 
-       while (!cpu_isset(cpuid, smp_commenced_mask))
+       while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                rmb();
 
        ipi_call_lock_irq();
-       cpu_set(cpuid, cpu_online_map);
+       set_cpu_online(cpuid, true);
        ipi_call_unlock_irq();
 
        /* idle thread is expected to have preempt disabled */
@@ -785,7 +785,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
 
 /* Send cross call to all processors mentioned in MASK_P
  * except self.  Really, there are only two cases currently,
- * "&cpu_online_map" and "&mm->cpu_vm_mask".
+ * "cpu_online_mask" and "mm_cpumask(mm)".
  */
 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 {
@@ -797,7 +797,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
 /* Send cross call to all processors except self. */
 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 {
-       smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
+       smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 }
 
 extern unsigned long xcall_sync_tick;
@@ -805,7 +805,7 @@ extern unsigned long xcall_sync_tick;
 static void smp_start_sync_tick_client(int cpu)
 {
        xcall_deliver((u64) &xcall_sync_tick, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 extern unsigned long xcall_call_function;
@@ -820,7 +820,7 @@ extern unsigned long xcall_call_function_single;
 void arch_send_call_function_single_ipi(int cpu)
 {
        xcall_deliver((u64) &xcall_call_function_single, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
@@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                }
                if (data0) {
                        xcall_deliver(data0, __pa(pg_addr),
-                                     (u64) pg_addr, &cpumask_of_cpu(cpu));
+                                     (u64) pg_addr, cpumask_of(cpu));
 #ifdef CONFIG_DEBUG_DCFLUSH
                        atomic_inc(&dcpage_flushes_xcall);
 #endif
@@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        }
        if (data0) {
                xcall_deliver(data0, __pa(pg_addr),
-                             (u64) pg_addr, &cpu_online_map);
+                             (u64) pg_addr, cpu_online_mask);
 #ifdef CONFIG_DEBUG_DCFLUSH
                atomic_inc(&dcpage_flushes_xcall);
 #endif
@@ -1197,32 +1197,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
        for_each_present_cpu(i) {
                unsigned int j;
 
-               cpus_clear(cpu_core_map[i]);
+               cpumask_clear(&cpu_core_map[i]);
                if (cpu_data(i).core_id == 0) {
-                       cpu_set(i, cpu_core_map[i]);
+                       cpumask_set_cpu(i, &cpu_core_map[i]);
                        continue;
                }
 
                for_each_present_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
-                               cpu_set(j, cpu_core_map[i]);
+                               cpumask_set_cpu(j, &cpu_core_map[i]);
                }
        }
 
        for_each_present_cpu(i) {
                unsigned int j;
 
-               cpus_clear(per_cpu(cpu_sibling_map, i));
+               cpumask_clear(&per_cpu(cpu_sibling_map, i));
                if (cpu_data(i).proc_id == -1) {
-                       cpu_set(i, per_cpu(cpu_sibling_map, i));
+                       cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
                        continue;
                }
 
                for_each_present_cpu(j) {
                        if (cpu_data(i).proc_id ==
                            cpu_data(j).proc_id)
-                               cpu_set(j, per_cpu(cpu_sibling_map, i));
+                               cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
                }
        }
 }
@@ -1232,10 +1232,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
        int ret = smp_boot_one_cpu(cpu);
 
        if (!ret) {
-               cpu_set(cpu, smp_commenced_mask);
-               while (!cpu_isset(cpu, cpu_online_map))
+               cpumask_set_cpu(cpu, &smp_commenced_mask);
+               while (!cpu_online(cpu))
                        mb();
-               if (!cpu_isset(cpu, cpu_online_map)) {
+               if (!cpu_online(cpu)) {
                        ret = -ENODEV;
                } else {
                        /* On SUN4V, writes to %tick and %stick are
@@ -1269,7 +1269,7 @@ void cpu_play_dead(void)
                                tb->nonresum_mondo_pa, 0);
        }
 
-       cpu_clear(cpu, smp_commenced_mask);
+       cpumask_clear_cpu(cpu, &smp_commenced_mask);
        membar_safe("#Sync");
 
        local_irq_disable();
@@ -1290,13 +1290,13 @@ int __cpu_disable(void)
        cpuinfo_sparc *c;
        int i;
 
-       for_each_cpu_mask(i, cpu_core_map[cpu])
-               cpu_clear(cpu, cpu_core_map[i]);
-       cpus_clear(cpu_core_map[cpu]);
+       for_each_cpu(i, &cpu_core_map[cpu])
+               cpumask_clear_cpu(cpu, &cpu_core_map[i]);
+       cpumask_clear(&cpu_core_map[cpu]);
 
-       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
+       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
 
        c = &cpu_data(cpu);
 
@@ -1313,7 +1313,7 @@ int __cpu_disable(void)
        local_irq_disable();
 
        ipi_call_lock();
-       cpu_clear(cpu, cpu_online_map);
+       set_cpu_online(cpu, false);
        ipi_call_unlock();
 
        cpu_map_rebuild();
@@ -1327,11 +1327,11 @@ void __cpu_die(unsigned int cpu)
 
        for (i = 0; i < 100; i++) {
                smp_rmb();
-               if (!cpu_isset(cpu, smp_commenced_mask))
+               if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
                        break;
                msleep(100);
        }
-       if (cpu_isset(cpu, smp_commenced_mask)) {
+       if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
                printk(KERN_ERR "CPU %u didn't die...\n", cpu);
        } else {
 #if defined(CONFIG_SUN_LDOMS)
@@ -1341,7 +1341,7 @@ void __cpu_die(unsigned int cpu)
                do {
                        hv_err = sun4v_cpu_stop(cpu);
                        if (hv_err == HV_EOK) {
-                               cpu_clear(cpu, cpu_present_map);
+                               set_cpu_present(cpu, false);
                                break;
                        }
                } while (--limit > 0);
@@ -1362,7 +1362,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 void smp_send_reschedule(int cpu)
 {
        xcall_deliver((u64) &xcall_receive_signal, 0, 0,
-                     &cpumask_of_cpu(cpu));
+                     cpumask_of(cpu));
 }
 
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
index 90eea38ad66f35a02b091d8a15ef8d888c5637ca..f6bf25a2ff804a89beec32ed9c0bdae2a01e3b01 100644 (file)
  */
 unsigned char __iomem *interrupt_enable;
 
-static void sun4c_disable_irq(unsigned int irq_nr)
+static void sun4c_mask_irq(struct irq_data *data)
 {
-       unsigned long flags;
-       unsigned char current_mask, new_mask;
-
-       local_irq_save(flags);
-       irq_nr &= (NR_IRQS - 1);
-       current_mask = sbus_readb(interrupt_enable);
-       switch (irq_nr) {
-       case 1:
-               new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
-               break;
-       case 8:
-               new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
-               break;
-       case 10:
-               new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
-               break;
-       case 14:
-               new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
-               break;
-       default:
+       unsigned long mask = (unsigned long)data->chip_data;
+
+       if (mask) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               mask = sbus_readb(interrupt_enable) & ~mask;
+               sbus_writeb(mask, interrupt_enable);
                local_irq_restore(flags);
-               return;
        }
-       sbus_writeb(new_mask, interrupt_enable);
-       local_irq_restore(flags);
 }
 
-static void sun4c_enable_irq(unsigned int irq_nr)
+static void sun4c_unmask_irq(struct irq_data *data)
 {
-       unsigned long flags;
-       unsigned char current_mask, new_mask;
-
-       local_irq_save(flags);
-       irq_nr &= (NR_IRQS - 1);
-       current_mask = sbus_readb(interrupt_enable);
-       switch (irq_nr) {
-       case 1:
-               new_mask = ((current_mask) | SUN4C_INT_E1);
-               break;
-       case 8:
-               new_mask = ((current_mask) | SUN4C_INT_E8);
-               break;
-       case 10:
-               new_mask = ((current_mask) | SUN4C_INT_E10);
-               break;
-       case 14:
-               new_mask = ((current_mask) | SUN4C_INT_E14);
-               break;
-       default:
+       unsigned long mask = (unsigned long)data->chip_data;
+
+       if (mask) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               mask = sbus_readb(interrupt_enable) | mask;
+               sbus_writeb(mask, interrupt_enable);
                local_irq_restore(flags);
-               return;
        }
-       sbus_writeb(new_mask, interrupt_enable);
-       local_irq_restore(flags);
+}
+
+static unsigned int sun4c_startup_irq(struct irq_data *data)
+{
+       irq_link(data->irq);
+       sun4c_unmask_irq(data);
+
+       return 0;
+}
+
+static void sun4c_shutdown_irq(struct irq_data *data)
+{
+       sun4c_mask_irq(data);
+       irq_unlink(data->irq);
+}
+
+static struct irq_chip sun4c_irq = {
+       .name           = "sun4c",
+       .irq_startup    = sun4c_startup_irq,
+       .irq_shutdown   = sun4c_shutdown_irq,
+       .irq_mask       = sun4c_mask_irq,
+       .irq_unmask     = sun4c_unmask_irq,
+};
+
+static unsigned int sun4c_build_device_irq(struct platform_device *op,
+                                          unsigned int real_irq)
+{
+        unsigned int irq;
+
+       if (real_irq >= 16) {
+               prom_printf("Bogus sun4c IRQ %u\n", real_irq);
+               prom_halt();
+       }
+
+       irq = irq_alloc(real_irq, real_irq);
+       if (irq) {
+               unsigned long mask = 0UL;
+
+               switch (real_irq) {
+               case 1:
+                       mask = SUN4C_INT_E1;
+                       break;
+               case 8:
+                       mask = SUN4C_INT_E8;
+                       break;
+               case 10:
+                       mask = SUN4C_INT_E10;
+                       break;
+               case 14:
+                       mask = SUN4C_INT_E14;
+                       break;
+               default:
+                       /* All the rest are either always enabled,
+                        * or are for signalling software interrupts.
+                        */
+                       break;
+               }
+               irq_set_chip_and_handler_name(irq, &sun4c_irq,
+                                             handle_level_irq, "level");
+               irq_set_chip_data(irq, (void *)mask);
+       }
+       return irq;
 }
 
 struct sun4c_timer_info {
@@ -144,8 +176,9 @@ static void sun4c_load_profile_irq(int cpu, unsigned int limit)
 
 static void __init sun4c_init_timers(irq_handler_t counter_fn)
 {
-       const struct linux_prom_irqs *irq;
+       const struct linux_prom_irqs *prom_irqs;
        struct device_node *dp;
+       unsigned int irq;
        const u32 *addr;
        int err;
 
@@ -163,9 +196,9 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
 
        sun4c_timers = (void __iomem *) (unsigned long) addr[0];
 
-       irq = of_get_property(dp, "intr", NULL);
+       prom_irqs = of_get_property(dp, "intr", NULL);
        of_node_put(dp);
-       if (!irq) {
+       if (!prom_irqs) {
                prom_printf("sun4c_init_timers: No intr property\n");
                prom_halt();
        }
@@ -178,15 +211,15 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
 
        master_l10_counter = &sun4c_timers->l10_count;
 
-       err = request_irq(irq[0].pri, counter_fn,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC),
-                         "timer", NULL);
+       irq = sun4c_build_device_irq(NULL, prom_irqs[0].pri);
+       err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
        if (err) {
                prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
                prom_halt();
        }
 
-       sun4c_disable_irq(irq[1].pri);
+       /* disable timer interrupt */
+       sun4c_mask_irq(irq_get_irq_data(irq));
 }
 
 #ifdef CONFIG_SMP
@@ -215,14 +248,11 @@ void __init sun4c_init_IRQ(void)
 
        interrupt_enable = (void __iomem *) (unsigned long) addr[0];
 
-       BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
 
-       sparc_irq_config.init_timers = sun4c_init_timers;
+       sparc_irq_config.init_timers      = sun4c_init_timers;
+       sparc_irq_config.build_device_irq = sun4c_build_device_irq;
 
 #ifdef CONFIG_SMP
        BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
index 77b4a89927100fe51be857b6a47d4fead6a8e135..a9ea60eb2c10f21e01cf007143e47942b6a768ad 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/io.h>
 #include <asm/sbi.h>
 #include <asm/cacheflush.h>
+#include <asm/setup.h>
 
 #include "kernel.h"
 #include "irq.h"
  * cpu local.  CPU local interrupts cover the timer interrupts
  * and whatnot, and we encode those as normal PILs between
  * 0 and 15.
- *
- * SBUS interrupts are encoded integers including the board number
- * (plus one), the SBUS level, and the SBUS slot number.  Sun4D
- * IRQ dispatch is done by:
- *
- * 1) Reading the BW local interrupt table in order to get the bus
- *    interrupt mask.
- *
- *    This table is indexed by SBUS interrupt level which can be
- *    derived from the PIL we got interrupted on.
- *
- * 2) For each bus showing interrupt pending from #1, read the
- *    SBI interrupt state register.  This will indicate which slots
- *    have interrupts pending for that SBUS interrupt level.
+ * SBUS interrupts are encodes as a combination of board, level and slot.
  */
 
+struct sun4d_handler_data {
+       unsigned int cpuid;    /* target cpu */
+       unsigned int real_irq; /* interrupt level */
+};
+
+
+static unsigned int sun4d_encode_irq(int board, int lvl, int slot)
+{
+       return (board + 1) << 5 | (lvl << 2) | slot;
+}
+
 struct sun4d_timer_regs {
        u32     l10_timer_limit;
        u32     l10_cur_countx;
@@ -48,17 +47,12 @@ struct sun4d_timer_regs {
 
 static struct sun4d_timer_regs __iomem *sun4d_timers;
 
-#define TIMER_IRQ      10
-
-#define MAX_STATIC_ALLOC       4
-static unsigned char sbus_tid[32];
-
-static struct irqaction *irq_action[NR_IRQS];
+#define SUN4D_TIMER_IRQ        10
 
-static struct sbus_action {
-       struct irqaction *action;
      /* For SMP this needs to be extended */
-} *sbus_actions;
+/* Specify which cpu handle interrupts from which board.
+ * Index is board - value is cpu.
+ */
+static unsigned char board_to_cpu[32];
 
 static int pil_to_sbus[] = {
        0,
@@ -79,152 +73,81 @@ static int pil_to_sbus[] = {
        0,
 };
 
-static int sbus_to_pil[] = {
-       0,
-       2,
-       3,
-       5,
-       7,
-       9,
-       11,
-       13,
-};
-
-static int nsbi;
-
 /* Exported for sun4d_smp.c */
 DEFINE_SPINLOCK(sun4d_imsk_lock);
 
-int show_sun4d_interrupts(struct seq_file *p, void *v)
+/* SBUS interrupts are encoded integers including the board number
+ * (plus one), the SBUS level, and the SBUS slot number.  Sun4D
+ * IRQ dispatch is done by:
+ *
+ * 1) Reading the BW local interrupt table in order to get the bus
+ *    interrupt mask.
+ *
+ *    This table is indexed by SBUS interrupt level which can be
+ *    derived from the PIL we got interrupted on.
+ *
+ * 2) For each bus showing interrupt pending from #1, read the
+ *    SBI interrupt state register.  This will indicate which slots
+ *    have interrupts pending for that SBUS interrupt level.
+ *
+ * 3) Call the genreric IRQ support.
+ */
+static void sun4d_sbus_handler_irq(int sbusl)
 {
-       int i = *(loff_t *) v, j = 0, k = 0, sbusl;
-       struct irqaction *action;
-       unsigned long flags;
-#ifdef CONFIG_SMP
-       int x;
-#endif
-
-       spin_lock_irqsave(&irq_action_lock, flags);
-       if (i < NR_IRQS) {
-               sbusl = pil_to_sbus[i];
-               if (!sbusl) {
-                       action = *(i + irq_action);
-                       if (!action)
-                               goto out_unlock;
-               } else {
-                       for (j = 0; j < nsbi; j++) {
-                               for (k = 0; k < 4; k++)
-                                       action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
-                                       if (action)
-                                               goto found_it;
-                       }
-                       goto out_unlock;
-               }
-found_it:      seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-               for_each_online_cpu(x)
-                       seq_printf(p, "%10u ",
-                              kstat_cpu(cpu_logical_map(x)).irqs[i]);
-#endif
-               seq_printf(p, "%c %s",
-                       (action->flags & IRQF_DISABLED) ? '+' : ' ',
-                       action->name);
-               action = action->next;
-               for (;;) {
-                       for (; action; action = action->next) {
-                               seq_printf(p, ",%s %s",
-                                       (action->flags & IRQF_DISABLED) ? " +" : "",
-                                       action->name);
-                       }
-                       if (!sbusl)
-                               break;
-                       k++;
-                       if (k < 4) {
-                               action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
-                       } else {
-                               j++;
-                               if (j == nsbi)
-                                       break;
-                               k = 0;
-                               action = sbus_actions[(j << 5) + (sbusl << 2)].action;
+       unsigned int bus_mask;
+       unsigned int sbino, slot;
+       unsigned int sbil;
+
+       bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
+       bw_clear_intr_mask(sbusl, bus_mask);
+
+       sbil = (sbusl << 2);
+       /* Loop for each pending SBI */
+       for (sbino = 0; bus_mask; sbino++) {
+               unsigned int idx, mask;
+
+               bus_mask >>= 1;
+               if (!(bus_mask & 1))
+                       continue;
+               /* XXX This seems to ACK the irq twice.  acquire_sbi()
+                * XXX uses swap, therefore this writes 0xf << sbil,
+                * XXX then later release_sbi() will write the individual
+                * XXX bits which were set again.
+                */
+               mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
+               mask &= (0xf << sbil);
+
+               /* Loop for each pending SBI slot */
+               idx = 0;
+               slot = (1 << sbil);
+               while (mask != 0) {
+                       unsigned int pil;
+                       struct irq_bucket *p;
+
+                       idx++;
+                       slot <<= 1;
+                       if (!(mask & slot))
+                               continue;
+
+                       mask &= ~slot;
+                       pil = sun4d_encode_irq(sbino, sbil, idx);
+
+                       p = irq_map[pil];
+                       while (p) {
+                               struct irq_bucket *next;
+
+                               next = p->next;
+                               generic_handle_irq(p->irq);
+                               p = next;
                        }
+                       release_sbi(SBI2DEVID(sbino), slot);
                }
-               seq_putc(p, '\n');
        }
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-       return 0;
-}
-
-void sun4d_free_irq(unsigned int irq, void *dev_id)
-{
-       struct irqaction *action, **actionp;
-       struct irqaction *tmp = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&irq_action_lock, flags);
-       if (irq < 15)
-               actionp = irq + irq_action;
-       else
-               actionp = &(sbus_actions[irq - (1 << 5)].action);
-       action = *actionp;
-       if (!action) {
-               printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
-               goto out_unlock;
-       }
-       if (dev_id) {
-               for (; action; action = action->next) {
-                       if (action->dev_id == dev_id)
-                               break;
-                       tmp = action;
-               }
-               if (!action) {
-                       printk(KERN_ERR "Trying to free free shared IRQ%d\n",
-                              irq);
-                       goto out_unlock;
-               }
-       } else if (action->flags & IRQF_SHARED) {
-               printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
-                      irq);
-               goto out_unlock;
-       }
-       if (action->flags & SA_STATIC_ALLOC) {
-               /*
-                * This interrupt is marked as specially allocated
-                * so it is a bad idea to free it.
-                */
-               printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
-                      irq, action->name);
-               goto out_unlock;
-       }
-
-       if (tmp)
-               tmp->next = action->next;
-       else
-               *actionp = action->next;
-
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-
-       synchronize_irq(irq);
-
-       spin_lock_irqsave(&irq_action_lock, flags);
-
-       kfree(action);
-
-       if (!(*actionp))
-               __disable_irq(irq);
-
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
 }
 
 void sun4d_handler_irq(int pil, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
-       struct irqaction *action;
-       int cpu = smp_processor_id();
        /* SBUS IRQ level (1 - 7) */
        int sbusl = pil_to_sbus[pil];
 
@@ -233,160 +156,96 @@ void sun4d_handler_irq(int pil, struct pt_regs *regs)
 
        cc_set_iclr(1 << pil);
 
+#ifdef CONFIG_SMP
+       /*
+        * Check IPI data structures after IRQ has been cleared. Hard and Soft
+        * IRQ can happen at the same time, so both cases are always handled.
+        */
+       if (pil == SUN4D_IPI_IRQ)
+               sun4d_ipi_interrupt();
+#endif
+
        old_regs = set_irq_regs(regs);
        irq_enter();
-       kstat_cpu(cpu).irqs[pil]++;
-       if (!sbusl) {
-               action = *(pil + irq_action);
-               if (!action)
-                       unexpected_irq(pil, NULL, regs);
-               do {
-                       action->handler(pil, action->dev_id);
-                       action = action->next;
-               } while (action);
+       if (sbusl == 0) {
+               /* cpu interrupt */
+               struct irq_bucket *p;
+
+               p = irq_map[pil];
+               while (p) {
+                       struct irq_bucket *next;
+
+                       next = p->next;
+                       generic_handle_irq(p->irq);
+                       p = next;
+               }
        } else {
-               int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
-               int sbino;
-               struct sbus_action *actionp;
-               unsigned mask, slot;
-               int sbil = (sbusl << 2);
-
-               bw_clear_intr_mask(sbusl, bus_mask);
-
-               /* Loop for each pending SBI */
-               for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
-                       if (bus_mask & 1) {
-                               mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
-                               mask &= (0xf << sbil);
-                               actionp = sbus_actions + (sbino << 5) + (sbil);
-                               /* Loop for each pending SBI slot */
-                               for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
-                                       if (mask & slot) {
-                                               mask &= ~slot;
-                                               action = actionp->action;
-
-                                               if (!action)
-                                                       unexpected_irq(pil, NULL, regs);
-                                               do {
-                                                       action->handler(pil, action->dev_id);
-                                                       action = action->next;
-                                               } while (action);
-                                               release_sbi(SBI2DEVID(sbino), slot);
-                                       }
-                       }
+               /* SBUS interrupt */
+               sun4d_sbus_handler_irq(sbusl);
        }
        irq_exit();
        set_irq_regs(old_regs);
 }
 
-int sun4d_request_irq(unsigned int irq,
-               irq_handler_t handler,
-               unsigned long irqflags, const char *devname, void *dev_id)
+
+static void sun4d_mask_irq(struct irq_data *data)
 {
-       struct irqaction *action, *tmp = NULL, **actionp;
+       struct sun4d_handler_data *handler_data = data->handler_data;
+       unsigned int real_irq;
+#ifdef CONFIG_SMP
+       int cpuid = handler_data->cpuid;
        unsigned long flags;
-       int ret;
-
-       if (irq > 14 && irq < (1 << 5)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (!handler) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       spin_lock_irqsave(&irq_action_lock, flags);
-
-       if (irq >= (1 << 5))
-               actionp = &(sbus_actions[irq - (1 << 5)].action);
-       else
-               actionp = irq + irq_action;
-       action = *actionp;
-
-       if (action) {
-               if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
-                       for (tmp = action; tmp->next; tmp = tmp->next)
-                               /* find last entry - tmp used below */;
-               } else {
-                       ret = -EBUSY;
-                       goto out_unlock;
-               }
-               if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
-                       printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
-                              irq);
-                       ret = -EBUSY;
-                       goto out_unlock;
-               }
-               action = NULL;          /* Or else! */
-       }
-
-       /* If this is flagged as statically allocated then we use our
-        * private struct which is never freed.
-        */
-       if (irqflags & SA_STATIC_ALLOC) {
-               if (static_irq_count < MAX_STATIC_ALLOC)
-                       action = &static_irqaction[static_irq_count++];
-               else
-                       printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
-                              irq, devname);
-       }
-
-       if (action == NULL)
-               action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
-
-       if (!action) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       action->handler = handler;
-       action->flags = irqflags;
-       action->name = devname;
-       action->next = NULL;
-       action->dev_id = dev_id;
-
-       if (tmp)
-               tmp->next = action;
-       else
-               *actionp = action;
-
-       __enable_irq(irq);
-
-       ret = 0;
-out_unlock:
-       spin_unlock_irqrestore(&irq_action_lock, flags);
-out:
-       return ret;
+#endif
+       real_irq = handler_data->real_irq;
+#ifdef CONFIG_SMP
+       spin_lock_irqsave(&sun4d_imsk_lock, flags);
+       cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq));
+       spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+       cc_set_imsk(cc_get_imsk() | (1 << real_irq));
+#endif
 }
 
-static void sun4d_disable_irq(unsigned int irq)
+static void sun4d_unmask_irq(struct irq_data *data)
 {
-       int tid = sbus_tid[(irq >> 5) - 1];
+       struct sun4d_handler_data *handler_data = data->handler_data;
+       unsigned int real_irq;
+#ifdef CONFIG_SMP
+       int cpuid = handler_data->cpuid;
        unsigned long flags;
+#endif
+       real_irq = handler_data->real_irq;
 
-       if (irq < NR_IRQS)
-               return;
-
+#ifdef CONFIG_SMP
        spin_lock_irqsave(&sun4d_imsk_lock, flags);
-       cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
+       cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq));
        spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+#else
+       cc_set_imsk(cc_get_imsk() | ~(1 << real_irq));
+#endif
 }
 
-static void sun4d_enable_irq(unsigned int irq)
+static unsigned int sun4d_startup_irq(struct irq_data *data)
 {
-       int tid = sbus_tid[(irq >> 5) - 1];
-       unsigned long flags;
-
-       if (irq < NR_IRQS)
-               return;
+       irq_link(data->irq);
+       sun4d_unmask_irq(data);
+       return 0;
+}
 
-       spin_lock_irqsave(&sun4d_imsk_lock, flags);
-       cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
-       spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
+static void sun4d_shutdown_irq(struct irq_data *data)
+{
+       sun4d_mask_irq(data);
+       irq_unlink(data->irq);
 }
 
+struct irq_chip sun4d_irq = {
+       .name           = "sun4d",
+       .irq_startup    = sun4d_startup_irq,
+       .irq_shutdown   = sun4d_shutdown_irq,
+       .irq_unmask     = sun4d_unmask_irq,
+       .irq_mask       = sun4d_mask_irq,
+};
+
 #ifdef CONFIG_SMP
 static void sun4d_set_cpu_int(int cpu, int level)
 {
@@ -413,7 +272,7 @@ void __init sun4d_distribute_irqs(void)
        for_each_node_by_name(dp, "sbi") {
                int devid = of_getintprop_default(dp, "device-id", 0);
                int board = of_getintprop_default(dp, "board#", 0);
-               sbus_tid[board] = cpuid;
+               board_to_cpu[board] = cpuid;
                set_sbi_tid(devid, cpuid << 3);
        }
        printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
@@ -443,15 +302,16 @@ static void __init sun4d_load_profile_irqs(void)
 unsigned int sun4d_build_device_irq(struct platform_device *op,
                                     unsigned int real_irq)
 {
-       static int pil_to_sbus[] = {
-               0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
-       };
        struct device_node *dp = op->dev.of_node;
        struct device_node *io_unit, *sbi = dp->parent;
        const struct linux_prom_registers *regs;
+       struct sun4d_handler_data *handler_data;
+       unsigned int pil;
+       unsigned int irq;
        int board, slot;
        int sbusl;
 
+       irq = 0;
        while (sbi) {
                if (!strcmp(sbi->name, "sbi"))
                        break;
@@ -484,7 +344,28 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
 
        sbusl = pil_to_sbus[real_irq];
        if (sbusl)
-               return (((board + 1) << 5) + (sbusl << 2) + slot);
+               pil = sun4d_encode_irq(board, sbusl, slot);
+       else
+               pil = real_irq;
+
+       irq = irq_alloc(real_irq, pil);
+       if (irq == 0)
+               goto err_out;
+
+       handler_data = irq_get_handler_data(irq);
+       if (unlikely(handler_data))
+               goto err_out;
+
+       handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
+       if (unlikely(!handler_data)) {
+               prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
+               prom_halt();
+       }
+       handler_data->cpuid    = board_to_cpu[board];
+       handler_data->real_irq = real_irq;
+       irq_set_chip_and_handler_name(irq, &sun4d_irq,
+                                     handle_level_irq, "level");
+       irq_set_handler_data(irq, handler_data);
 
 err_out:
        return real_irq;
@@ -518,6 +399,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
 {
        struct device_node *dp;
        struct resource res;
+       unsigned int irq;
        const u32 *reg;
        int err;
 
@@ -552,9 +434,8 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
 
        master_l10_counter = &sun4d_timers->l10_cur_count;
 
-       err = request_irq(TIMER_IRQ, counter_fn,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC),
-                         "timer", NULL);
+       irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ);
+       err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
        if (err) {
                prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
                             err);
@@ -567,27 +448,16 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
 void __init sun4d_init_sbi_irq(void)
 {
        struct device_node *dp;
-       int target_cpu = 0;
+       int target_cpu;
 
-#ifdef CONFIG_SMP
        target_cpu = boot_cpu_id;
-#endif
-
-       nsbi = 0;
-       for_each_node_by_name(dp, "sbi")
-               nsbi++;
-       sbus_actions = kzalloc(nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
-       if (!sbus_actions) {
-               prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
-               prom_halt();
-       }
        for_each_node_by_name(dp, "sbi") {
                int devid = of_getintprop_default(dp, "device-id", 0);
                int board = of_getintprop_default(dp, "board#", 0);
                unsigned int mask;
 
                set_sbi_tid(devid, target_cpu << 3);
-               sbus_tid[board] = target_cpu;
+               board_to_cpu[board] = target_cpu;
 
                /* Get rid of pending irqs from PROM */
                mask = acquire_sbi(devid, 0xffffffff);
@@ -603,12 +473,10 @@ void __init sun4d_init_IRQ(void)
 {
        local_irq_disable();
 
-       BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
 
-       sparc_irq_config.init_timers = sun4d_init_timers;
+       sparc_irq_config.init_timers      = sun4d_init_timers;
        sparc_irq_config.build_device_irq = sun4d_build_device_irq;
 
 #ifdef CONFIG_SMP
index 475d50b96cd0bb7f49d0e05a9ce566a10edab3a5..133387980b5635f3dfddc2d33609533beec6f1b7 100644 (file)
@@ -32,6 +32,7 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
        return val;
 }
 
+static void smp4d_ipi_init(void);
 static void smp_setup_percpu_timer(void);
 
 static unsigned char cpu_leds[32];
@@ -80,8 +81,6 @@ void __cpuinit smp4d_callin(void)
        local_flush_cache_all();
        local_flush_tlb_all();
 
-       cpu_probe();
-
        while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
                barrier();
 
@@ -105,7 +104,7 @@ void __cpuinit smp4d_callin(void)
 
        local_irq_enable();     /* We don't allow PIL 14 yet */
 
-       while (!cpu_isset(cpuid, smp_commenced_mask))
+       while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                barrier();
 
        spin_lock_irqsave(&sun4d_imsk_lock, flags);
@@ -120,6 +119,7 @@ void __cpuinit smp4d_callin(void)
  */
 void __init smp4d_boot_cpus(void)
 {
+       smp4d_ipi_init();
        if (boot_cpu_id)
                current_set[0] = NULL;
        smp_setup_percpu_timer();
@@ -191,6 +191,80 @@ void __init smp4d_smp_done(void)
        sun4d_distribute_irqs();
 }
 
+/* Memory structure giving interrupt handler information about IPI generated */
+struct sun4d_ipi_work {
+       int single;
+       int msk;
+       int resched;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work);
+
+/* Initialize IPIs on the SUN4D SMP machine */
+static void __init smp4d_ipi_init(void)
+{
+       int cpu;
+       struct sun4d_ipi_work *work;
+
+       printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ);
+
+       for_each_possible_cpu(cpu) {
+               work = &per_cpu(sun4d_ipi_work, cpu);
+               work->single = work->msk = work->resched = 0;
+       }
+}
+
+void sun4d_ipi_interrupt(void)
+{
+       struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
+
+       if (work->single) {
+               work->single = 0;
+               smp_call_function_single_interrupt();
+       }
+       if (work->msk) {
+               work->msk = 0;
+               smp_call_function_interrupt();
+       }
+       if (work->resched) {
+               work->resched = 0;
+               smp_resched_interrupt();
+       }
+}
+
+static void smp4d_ipi_single(int cpu)
+{
+       struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+       /* Mark work */
+       work->single = 1;
+
+       /* Generate IRQ on the CPU */
+       sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
+static void smp4d_ipi_mask_one(int cpu)
+{
+       struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+       /* Mark work */
+       work->msk = 1;
+
+       /* Generate IRQ on the CPU */
+       sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
+static void smp4d_ipi_resched(int cpu)
+{
+       struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
+
+       /* Mark work */
+       work->resched = 1;
+
+       /* Generate IRQ on the CPU (any IRQ will cause resched) */
+       sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
+}
+
 static struct smp_funcall {
        smpfunc_t func;
        unsigned long arg1;
@@ -239,10 +313,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
                {
                        register int i;
 
-                       cpu_clear(smp_processor_id(), mask);
-                       cpus_and(mask, cpu_online_map, mask);
+                       cpumask_clear_cpu(smp_processor_id(), &mask);
+                       cpumask_and(&mask, cpu_online_mask, &mask);
                        for (i = 0; i <= high; i++) {
-                               if (cpu_isset(i, mask)) {
+                               if (cpumask_test_cpu(i, &mask)) {
                                        ccall_info.processors_in[i] = 0;
                                        ccall_info.processors_out[i] = 0;
                                        sun4d_send_ipi(i, IRQ_CROSS_CALL);
@@ -255,7 +329,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
                                while (!ccall_info.processors_in[i])
                                        barrier();
@@ -263,7 +337,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
                                while (!ccall_info.processors_out[i])
                                        barrier();
@@ -356,6 +430,9 @@ void __init sun4d_init_smp(void)
        BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
        BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_resched, smp4d_ipi_resched, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_single, smp4d_ipi_single, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_mask_one, smp4d_ipi_mask_one, BTFIXUPCALL_NORM);
 
        for (i = 0; i < NR_CPUS; i++) {
                ccall_info.processors_in[i] = 1;
index 69df6257a32ec87d31fdbce4ce99cb5c94c37f46..422c16dad1f66610f4d90b8723b652c658cfcae3 100644 (file)
 struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
 struct sun4m_irq_global __iomem *sun4m_irq_global;
 
+struct sun4m_handler_data {
+       bool    percpu;
+       long    mask;
+};
+
 /* Dave Redman (djhr@tadpole.co.uk)
  * The sun4m interrupt registers.
  */
@@ -142,9 +147,9 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
 #define        OBP_INT_LEVEL_VME       0x40
 
 #define SUN4M_TIMER_IRQ         (OBP_INT_LEVEL_ONBOARD | 10)
-#define SUM4M_PROFILE_IRQ       (OBP_INT_LEVEL_ONBOARD | 14)
+#define SUN4M_PROFILE_IRQ       (OBP_INT_LEVEL_ONBOARD | 14)
 
-static unsigned long irq_mask[0x50] = {
+static unsigned long sun4m_imask[0x50] = {
        /* 0x00 - SMP */
        0,  SUN4M_SOFT_INT(1),
        SUN4M_SOFT_INT(2),  SUN4M_SOFT_INT(3),
@@ -169,7 +174,7 @@ static unsigned long irq_mask[0x50] = {
        SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
        SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
        (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
-       SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR,
+       SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR,
        /* 0x30 - sbus */
        0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
        0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
@@ -182,105 +187,110 @@ static unsigned long irq_mask[0x50] = {
        0, SUN4M_INT_VME(6), 0, 0
 };
 
-static unsigned long sun4m_get_irqmask(unsigned int irq)
+static void sun4m_mask_irq(struct irq_data *data)
 {
-       unsigned long mask;
-
-       if (irq < 0x50)
-               mask = irq_mask[irq];
-       else
-               mask = 0;
+       struct sun4m_handler_data *handler_data = data->handler_data;
+       int cpu = smp_processor_id();
 
-       if (!mask)
-               printk(KERN_ERR "sun4m_get_irqmask: IRQ%d has no valid mask!\n",
-                      irq);
+       if (handler_data->mask) {
+               unsigned long flags;
 
-       return mask;
+               local_irq_save(flags);
+               if (handler_data->percpu) {
+                       sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set);
+               } else {
+                       sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set);
+               }
+               local_irq_restore(flags);
+       }
 }
 
-static void sun4m_disable_irq(unsigned int irq_nr)
+static void sun4m_unmask_irq(struct irq_data *data)
 {
-       unsigned long mask, flags;
+       struct sun4m_handler_data *handler_data = data->handler_data;
        int cpu = smp_processor_id();
 
-       mask = sun4m_get_irqmask(irq_nr);
-       local_irq_save(flags);
-       if (irq_nr > 15)
-               sbus_writel(mask, &sun4m_irq_global->mask_set);
-       else
-               sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
-       local_irq_restore(flags);
-}
-
-static void sun4m_enable_irq(unsigned int irq_nr)
-{
-       unsigned long mask, flags;
-       int cpu = smp_processor_id();
+       if (handler_data->mask) {
+               unsigned long flags;
 
-       /* Dreadful floppy hack. When we use 0x2b instead of
-        * 0x0b the system blows (it starts to whistle!).
-        * So we continue to use 0x0b. Fixme ASAP. --P3
-        */
-       if (irq_nr != 0x0b) {
-               mask = sun4m_get_irqmask(irq_nr);
-               local_irq_save(flags);
-               if (irq_nr > 15)
-                       sbus_writel(mask, &sun4m_irq_global->mask_clear);
-               else
-                       sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
-               local_irq_restore(flags);
-       } else {
                local_irq_save(flags);
-               sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear);
+               if (handler_data->percpu) {
+                       sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear);
+               } else {
+                       sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear);
+               }
                local_irq_restore(flags);
        }
 }
 
-static unsigned long cpu_pil_to_imask[16] = {
-/*0*/  0x00000000,
-/*1*/  0x00000000,
-/*2*/  SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0),
-/*3*/  SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1),
-/*4*/  SUN4M_INT_SCSI,
-/*5*/  SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2),
-/*6*/  SUN4M_INT_ETHERNET,
-/*7*/  SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3),
-/*8*/  SUN4M_INT_VIDEO,
-/*9*/  SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR,
-/*10*/ SUN4M_INT_REALTIME,
-/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY,
-/*12*/ SUN4M_INT_SERIAL  | SUN4M_INT_KBDMS,
-/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
-/*14*/ SUN4M_INT_E14,
-/*15*/ SUN4M_INT_ERROR,
-};
+static unsigned int sun4m_startup_irq(struct irq_data *data)
+{
+       irq_link(data->irq);
+       sun4m_unmask_irq(data);
+       return 0;
+}
 
-/* We assume the caller has disabled local interrupts when these are called,
- * or else very bizarre behavior will result.
- */
-static void sun4m_disable_pil_irq(unsigned int pil)
+static void sun4m_shutdown_irq(struct irq_data *data)
 {
-       sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_set);
+       sun4m_mask_irq(data);
+       irq_unlink(data->irq);
 }
 
-static void sun4m_enable_pil_irq(unsigned int pil)
+static struct irq_chip sun4m_irq = {
+       .name           = "sun4m",
+       .irq_startup    = sun4m_startup_irq,
+       .irq_shutdown   = sun4m_shutdown_irq,
+       .irq_mask       = sun4m_mask_irq,
+       .irq_unmask     = sun4m_unmask_irq,
+};
+
+
+static unsigned int sun4m_build_device_irq(struct platform_device *op,
+                                          unsigned int real_irq)
 {
-       sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_clear);
+       struct sun4m_handler_data *handler_data;
+       unsigned int irq;
+       unsigned int pil;
+
+       if (real_irq >= OBP_INT_LEVEL_VME) {
+               prom_printf("Bogus sun4m IRQ %u\n", real_irq);
+               prom_halt();
+       }
+       pil = (real_irq & 0xf);
+       irq = irq_alloc(real_irq, pil);
+
+       if (irq == 0)
+               goto out;
+
+       handler_data = irq_get_handler_data(irq);
+       if (unlikely(handler_data))
+               goto out;
+
+       handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC);
+       if (unlikely(!handler_data)) {
+               prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n");
+               prom_halt();
+       }
+
+       handler_data->mask = sun4m_imask[real_irq];
+       handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD;
+       irq_set_chip_and_handler_name(irq, &sun4m_irq,
+                                     handle_level_irq, "level");
+       irq_set_handler_data(irq, handler_data);
+
+out:
+       return irq;
 }
 
 #ifdef CONFIG_SMP
 static void sun4m_send_ipi(int cpu, int level)
 {
-       unsigned long mask = sun4m_get_irqmask(level);
-
-       sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
+       sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
 }
 
 static void sun4m_clear_ipi(int cpu, int level)
 {
-       unsigned long mask = sun4m_get_irqmask(level);
-
-       sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
+       sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear);
 }
 
 static void sun4m_set_udt(int cpu)
@@ -343,7 +353,15 @@ void sun4m_nmi(struct pt_regs *regs)
        prom_halt();
 }
 
-/* Exported for sun4m_smp.c */
+void sun4m_unmask_profile_irq(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear);
+       local_irq_restore(flags);
+}
+
 void sun4m_clear_profile_irq(int cpu)
 {
        sbus_readl(&timers_percpu[cpu]->l14_limit);
@@ -358,6 +376,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
 {
        struct device_node *dp = of_find_node_by_name(NULL, "counter");
        int i, err, len, num_cpu_timers;
+       unsigned int irq;
        const u32 *addr;
 
        if (!dp) {
@@ -384,8 +403,9 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
 
        master_l10_counter = &timers_global->l10_count;
 
-       err = request_irq(SUN4M_TIMER_IRQ, counter_fn,
-                         (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
+       irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
+
+       err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
        if (err) {
                printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
                        err);
@@ -452,14 +472,11 @@ void __init sun4m_init_IRQ(void)
        if (num_cpu_iregs == 4)
                sbus_writel(0, &sun4m_irq_global->interrupt_target);
 
-       BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
 
        sparc_irq_config.init_timers = sun4m_init_timers;
+       sparc_irq_config.build_device_irq = sun4m_build_device_irq;
 
 #ifdef CONFIG_SMP
        BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
index 5cc7dc51de3d917c0d267d5ea79df83107e195f2..594768686525a9770b72ea09ef3aef19138ae29d 100644 (file)
@@ -15,6 +15,9 @@
 #include "irq.h"
 #include "kernel.h"
 
+#define IRQ_IPI_SINGLE         12
+#define IRQ_IPI_MASK           13
+#define IRQ_IPI_RESCHED                14
 #define IRQ_CROSS_CALL         15
 
 static inline unsigned long
@@ -26,6 +29,7 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
        return val;
 }
 
+static void smp4m_ipi_init(void);
 static void smp_setup_percpu_timer(void);
 
 void __cpuinit smp4m_callin(void)
@@ -59,8 +63,6 @@ void __cpuinit smp4m_callin(void)
        local_flush_cache_all();
        local_flush_tlb_all();
 
-       cpu_probe();
-
        /* Fix idle thread fields. */
        __asm__ __volatile__("ld [%0], %%g6\n\t"
                             : : "r" (&current_set[cpuid])
@@ -70,7 +72,7 @@ void __cpuinit smp4m_callin(void)
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
 
-       while (!cpu_isset(cpuid, smp_commenced_mask))
+       while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
                mb();
 
        local_irq_enable();
@@ -83,6 +85,7 @@ void __cpuinit smp4m_callin(void)
  */
 void __init smp4m_boot_cpus(void)
 {
+       smp4m_ipi_init();
        smp_setup_percpu_timer();
        local_flush_cache_all();
 }
@@ -150,18 +153,25 @@ void __init smp4m_smp_done(void)
        /* Ok, they are spinning and ready to go. */
 }
 
-/* At each hardware IRQ, we get this called to forward IRQ reception
- * to the next processor.  The caller must disable the IRQ level being
- * serviced globally so that there are no double interrupts received.
- *
- * XXX See sparc64 irq.c.
- */
-void smp4m_irq_rotate(int cpu)
+
+/* Initialize IPIs on the SUN4M SMP machine */
+static void __init smp4m_ipi_init(void)
+{
+}
+
+static void smp4m_ipi_resched(int cpu)
+{
+       set_cpu_int(cpu, IRQ_IPI_RESCHED);
+}
+
+static void smp4m_ipi_single(int cpu)
 {
-       int next = cpu_data(cpu).next;
+       set_cpu_int(cpu, IRQ_IPI_SINGLE);
+}
 
-       if (next != cpu)
-               set_irq_udt(next);
+static void smp4m_ipi_mask_one(int cpu)
+{
+       set_cpu_int(cpu, IRQ_IPI_MASK);
 }
 
 static struct smp_funcall {
@@ -199,10 +209,10 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
                {
                        register int i;
 
-                       cpu_clear(smp_processor_id(), mask);
-                       cpus_and(mask, cpu_online_map, mask);
+                       cpumask_clear_cpu(smp_processor_id(), &mask);
+                       cpumask_and(&mask, cpu_online_mask, &mask);
                        for (i = 0; i < ncpus; i++) {
-                               if (cpu_isset(i, mask)) {
+                               if (cpumask_test_cpu(i, &mask)) {
                                        ccall_info.processors_in[i] = 0;
                                        ccall_info.processors_out[i] = 0;
                                        set_cpu_int(i, IRQ_CROSS_CALL);
@@ -218,7 +228,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
                                while (!ccall_info.processors_in[i])
                                        barrier();
@@ -226,7 +236,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 
                        i = 0;
                        do {
-                               if (!cpu_isset(i, mask))
+                               if (!cpumask_test_cpu(i, &mask))
                                        continue;
                                while (!ccall_info.processors_out[i])
                                        barrier();
@@ -277,7 +287,7 @@ static void __cpuinit smp_setup_percpu_timer(void)
        load_profile_irq(cpu, lvl14_resolution);
 
        if (cpu == boot_cpu_id)
-               enable_pil_irq(14);
+               sun4m_unmask_profile_irq();
 }
 
 static void __init smp4m_blackbox_id(unsigned *addr)
@@ -306,4 +316,7 @@ void __init sun4m_init_smp(void)
        BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
        BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM);
+       BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM);
 }
index 1eb8b00aed75f293b5df18076e1fc3b81b2cf9c7..7408201d7efb5c44f58918ca6f32e10a567d8e6a 100644 (file)
@@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu,
                                unsigned long (*func)(unsigned long),
                                unsigned long arg)
 {
-       cpumask_t old_affinity = current->cpus_allowed;
+       cpumask_t old_affinity;
        unsigned long ret;
 
+       cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
        /* should return -EINVAL to userspace */
        if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
                return 0;
index 96046a4024c23276e41769403a5155ec62ea2cd3..1060e0672a4b439972fbfa0c728b6ba6403a777d 100644 (file)
@@ -228,14 +228,10 @@ static void __init sbus_time_init(void)
 
 void __init time_init(void)
 {
-#ifdef CONFIG_PCI
-       extern void pci_time_init(void);
-       if (pcic_present()) {
+       if (pcic_present())
                pci_time_init();
-               return;
-       }
-#endif
-       sbus_time_init();
+       else
+               sbus_time_init();
 }
 
 
index 8f982b76c71259e7a28f1f1551629eb397023a31..531d54fc9829271efb36c572a9b32129fddded59 100644 (file)
@@ -237,7 +237,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
        if (!cpu_online(cpu))
                return 0;
 
-       cpus_allowed = current->cpus_allowed;
+       cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
        set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
        clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@@ -258,7 +258,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
        if (!cpu_online(cpu))
                return;
 
-       cpus_allowed = current->cpus_allowed;
+       cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
        set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
        new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
index f35d1e794548eb8541bee5198f4a353f346d1e3e..9a8ceb7008330a9fb695c1ddb2d2377cb15bb68f 100644 (file)
@@ -85,7 +85,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
        if (!cpu_online(cpu))
                return 0;
 
-       cpus_allowed = current->cpus_allowed;
+       cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
        set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
        reg = read_safari_cfg();
@@ -105,7 +105,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
        if (!cpu_online(cpu))
                return;
 
-       cpus_allowed = current->cpus_allowed;
+       cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
        set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
        new_freq = sparc64_get_clock_tick(cpu) / 1000;
index 846d1c4374ea9c0c24a64bc4088185388274afd3..7f01b8fce8bcff65e77709806157a28b854ae51b 100644 (file)
@@ -15,7 +15,6 @@ lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-y                 += atomic_$(BITS).o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
-lib-$(CONFIG_SPARC32) += rwsem_32.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
diff --git a/arch/sparc/lib/rwsem_32.S b/arch/sparc/lib/rwsem_32.S
deleted file mode 100644 (file)
index 9675268..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Assembly part of rw semaphores.
- *
- * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <asm/ptrace.h>
-#include <asm/psr.h>
-
-       .section .sched.text, "ax"
-       .align  4
-
-       .globl          ___down_read
-___down_read:
-       rd              %psr, %g3
-       nop
-       nop
-       nop
-       or              %g3, PSR_PIL, %g7
-       wr              %g7, 0, %psr
-       nop
-       nop
-       nop
-#ifdef CONFIG_SMP
-1:     ldstub          [%g1 + 4], %g7
-       tst             %g7
-       bne             1b
-        ld             [%g1], %g7
-       sub             %g7, 1, %g7
-       st              %g7, [%g1]
-       stb             %g0, [%g1 + 4]
-#else
-       ld              [%g1], %g7
-       sub             %g7, 1, %g7
-       st              %g7, [%g1]
-#endif
-       wr              %g3, 0, %psr
-       add             %g7, 1, %g7
-       nop
-       nop
-       subcc           %g7, 1, %g7
-       bneg            3f
-        nop
-2:     jmpl            %o7, %g0
-        mov            %g4, %o7
-3:     save            %sp, -64, %sp
-       mov             %g1, %l1
-       mov             %g4, %l4
-       bcs             4f
-        mov            %g5, %l5
-       call            down_read_failed
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              ___down_read
-        restore        %l5, %g0, %g5
-4:     call            down_read_failed_biased
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              2b
-        restore        %l5, %g0, %g5
-
-       .globl          ___down_write
-___down_write:
-       rd              %psr, %g3
-       nop
-       nop
-       nop
-       or              %g3, PSR_PIL, %g7
-       wr              %g7, 0, %psr
-       sethi           %hi(0x01000000), %g2
-       nop
-       nop
-#ifdef CONFIG_SMP
-1:     ldstub          [%g1 + 4], %g7
-       tst             %g7
-       bne             1b
-        ld             [%g1], %g7
-       sub             %g7, %g2, %g7
-       st              %g7, [%g1]
-       stb             %g0, [%g1 + 4]
-#else
-       ld              [%g1], %g7
-       sub             %g7, %g2, %g7
-       st              %g7, [%g1]
-#endif
-       wr              %g3, 0, %psr
-       add             %g7, %g2, %g7
-       nop
-       nop
-       subcc           %g7, %g2, %g7
-       bne             3f
-        nop
-2:     jmpl            %o7, %g0
-        mov            %g4, %o7
-3:     save            %sp, -64, %sp
-       mov             %g1, %l1
-       mov             %g4, %l4
-       bcs             4f
-        mov            %g5, %l5
-       call            down_write_failed
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              ___down_write
-        restore        %l5, %g0, %g5
-4:     call            down_write_failed_biased
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              2b
-        restore        %l5, %g0, %g5
-
-       .text
-       .globl          ___up_read
-___up_read:
-       rd              %psr, %g3
-       nop
-       nop
-       nop
-       or              %g3, PSR_PIL, %g7
-       wr              %g7, 0, %psr
-       nop
-       nop
-       nop
-#ifdef CONFIG_SMP
-1:     ldstub          [%g1 + 4], %g7
-       tst             %g7
-       bne             1b
-        ld             [%g1], %g7
-       add             %g7, 1, %g7
-       st              %g7, [%g1]
-       stb             %g0, [%g1 + 4]
-#else
-       ld              [%g1], %g7
-       add             %g7, 1, %g7
-       st              %g7, [%g1]
-#endif
-       wr              %g3, 0, %psr
-       nop
-       nop
-       nop
-       cmp             %g7, 0
-       be              3f
-        nop
-2:     jmpl            %o7, %g0
-        mov            %g4, %o7
-3:     save            %sp, -64, %sp
-       mov             %g1, %l1
-       mov             %g4, %l4
-       mov             %g5, %l5
-       clr             %o1
-       call            __rwsem_wake
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              2b
-        restore        %l5, %g0, %g5
-
-       .globl          ___up_write
-___up_write:
-       rd              %psr, %g3
-       nop
-       nop
-       nop
-       or              %g3, PSR_PIL, %g7
-       wr              %g7, 0, %psr
-       sethi           %hi(0x01000000), %g2
-       nop
-       nop
-#ifdef CONFIG_SMP
-1:     ldstub          [%g1 + 4], %g7
-       tst             %g7
-       bne             1b
-        ld             [%g1], %g7
-       add             %g7, %g2, %g7
-       st              %g7, [%g1]
-       stb             %g0, [%g1 + 4]
-#else
-       ld              [%g1], %g7
-       add             %g7, %g2, %g7
-       st              %g7, [%g1]
-#endif
-       wr              %g3, 0, %psr
-       sub             %g7, %g2, %g7
-       nop
-       nop
-       addcc           %g7, %g2, %g7
-       bcs             3f
-        nop
-2:     jmpl            %o7, %g0
-        mov            %g4, %o7
-3:     save            %sp, -64, %sp
-       mov             %g1, %l1
-       mov             %g4, %l4
-       mov             %g5, %l5
-       mov             %g7, %o1
-       call            __rwsem_wake
-        mov            %l1, %o0
-       mov             %l1, %g1
-       mov             %l4, %g4
-       ba              2b
-        restore        %l5, %g0, %g5
index 2f6ae1d1fb6b19c3475efec961812c991e417a79..e10cd03fab801648ef2e5da6361a0a23867560aa 100644 (file)
@@ -862,7 +862,7 @@ static void init_node_masks_nonnuma(void)
        for (i = 0; i < NR_CPUS; i++)
                numa_cpu_lookup_table[i] = 0;
 
-       numa_cpumask_lookup_table[0] = CPU_MASK_ALL;
+       cpumask_setall(&numa_cpumask_lookup_table[0]);
 }
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1080,7 +1080,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
 {
        u64 arc;
 
-       cpus_clear(*mask);
+       cpumask_clear(mask);
 
        mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
                u64 target = mdesc_arc_target(md, arc);
@@ -1091,7 +1091,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
                        continue;
                id = mdesc_get_property(md, target, "id", NULL);
                if (*id < nr_cpu_ids)
-                       cpu_set(*id, *mask);
+                       cpumask_set_cpu(*id, mask);
        }
 }
 
@@ -1153,13 +1153,13 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
 
        numa_parse_mdesc_group_cpus(md, grp, &mask);
 
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu(cpu, &mask)
                numa_cpu_lookup_table[cpu] = index;
-       numa_cpumask_lookup_table[index] = mask;
+       cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
 
        if (numa_debug) {
                printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
-               for_each_cpu_mask(cpu, mask)
+               for_each_cpu(cpu, &mask)
                        printk("%d ", cpu);
                printk("]\n");
        }
@@ -1218,7 +1218,7 @@ static int __init numa_parse_jbus(void)
        index = 0;
        for_each_present_cpu(cpu) {
                numa_cpu_lookup_table[cpu] = index;
-               numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu);
+               cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
                node_masks[index].mask = ~((1UL << 36UL) - 1UL);
                node_masks[index].val = cpu << 36UL;
 
index cbc70a27430ce3dc7010fbf07909ed886674c5e0..c8b41623377f14462a248b245ffab97792e9409c 100644 (file)
@@ -254,7 +254,7 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 }
 #endif
 
-static int disable_smep __initdata;
+static int disable_smep __cpuinitdata;
 static __init int setup_disable_smep(char *arg)
 {
        disable_smep = 1;
@@ -262,7 +262,7 @@ static __init int setup_disable_smep(char *arg)
 }
 __setup("nosmep", setup_disable_smep);
 
-static __init void setup_smep(struct cpuinfo_x86 *c)
+static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
 {
        if (cpu_has(c, X86_FEATURE_SMEP)) {
                if (unlikely(disable_smep)) {
index b2699bb2e5303472d5e67e72086ac517195eeab7..d871b14ed5a186a22d758308f1a498fbfe66de26 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/genhd.h>
 #include <net/tcp.h>
 #include <linux/lru_cache.h>
+#include <linux/prefetch.h>
 
 #ifdef __CHECKER__
 # define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
index c9213ead4a263cd12da99cfbb4a2366233a27141..a4d6cb0c0343a32c166f3003f65378e5a31da7f1 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
+#include <linux/prefetch.h>
 #include <linux/i7300_idle.h>
 #include "dma.h"
 #include "registers.h"
index effd140fc042b827617bebce013dae190c7b4d76..f4a51d4d0349f95800b84ff85839c0f6869036a1 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
+#include <linux/prefetch.h>
 #include <linux/i7300_idle.h>
 #include "dma.h"
 #include "dma_v2.h"
index d0f4990984793792e9cf5647e252d2a2dfe04a1b..d845dc4b71039a88787cf6cd1836c189772d3e23 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/gfp.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
 #include "registers.h"
 #include "hw.h"
 #include "dma.h"
index 3c44fbc81acba5758a38b77169dfe566c9b1cfd5..29d2423fae6d999827c682648278366c07238a6e 100644 (file)
@@ -228,8 +228,8 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
 
        /* Use an arbitrary short delay to combine multiple reset requests. */
        fw_card_get(card);
-       if (!schedule_delayed_work(&card->br_work,
-                                  delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+       if (!queue_delayed_work(fw_workqueue, &card->br_work,
+                               delayed ? DIV_ROUND_UP(HZ, 100) : 0))
                fw_card_put(card);
 }
 EXPORT_SYMBOL(fw_schedule_bus_reset);
@@ -241,7 +241,7 @@ static void br_work(struct work_struct *work)
        /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
        if (card->reset_jiffies != 0 &&
            time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
-               if (!schedule_delayed_work(&card->br_work, 2 * HZ))
+               if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
                        fw_card_put(card);
                return;
        }
@@ -258,8 +258,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
 
        if (!card->broadcast_channel_allocated) {
                fw_iso_resource_manage(card, generation, 1ULL << 31,
-                                      &channel, &bandwidth, true,
-                                      card->bm_transaction_data);
+                                      &channel, &bandwidth, true);
                if (channel != 31) {
                        fw_notify("failed to allocate broadcast channel\n");
                        return;
@@ -294,6 +293,7 @@ static void bm_work(struct work_struct *work)
        bool root_device_is_cmc;
        bool irm_is_1394_1995_only;
        bool keep_this_irm;
+       __be32 transaction_data[2];
 
        spin_lock_irq(&card->lock);
 
@@ -355,21 +355,21 @@ static void bm_work(struct work_struct *work)
                        goto pick_me;
                }
 
-               card->bm_transaction_data[0] = cpu_to_be32(0x3f);
-               card->bm_transaction_data[1] = cpu_to_be32(local_id);
+               transaction_data[0] = cpu_to_be32(0x3f);
+               transaction_data[1] = cpu_to_be32(local_id);
 
                spin_unlock_irq(&card->lock);
 
                rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
                                irm_id, generation, SCODE_100,
                                CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
-                               card->bm_transaction_data, 8);
+                               transaction_data, 8);
 
                if (rcode == RCODE_GENERATION)
                        /* Another bus reset, BM work has been rescheduled. */
                        goto out;
 
-               bm_id = be32_to_cpu(card->bm_transaction_data[0]);
+               bm_id = be32_to_cpu(transaction_data[0]);
 
                spin_lock_irq(&card->lock);
                if (rcode == RCODE_COMPLETE && generation == card->generation)
@@ -490,11 +490,11 @@ static void bm_work(struct work_struct *work)
                /*
                 * Make sure that the cycle master sends cycle start packets.
                 */
-               card->bm_transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+               transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
                rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
                                root_id, generation, SCODE_100,
                                CSR_REGISTER_BASE + CSR_STATE_SET,
-                               card->bm_transaction_data, 4);
+                               transaction_data, 4);
                if (rcode == RCODE_GENERATION)
                        goto out;
        }
@@ -630,6 +630,10 @@ static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
        return -ENODEV;
 }
 
+static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
+{
+}
+
 static const struct fw_card_driver dummy_driver_template = {
        .read_phy_reg           = dummy_read_phy_reg,
        .update_phy_reg         = dummy_update_phy_reg,
@@ -641,6 +645,7 @@ static const struct fw_card_driver dummy_driver_template = {
        .start_iso              = dummy_start_iso,
        .set_iso_channels       = dummy_set_iso_channels,
        .queue_iso              = dummy_queue_iso,
+       .flush_queue_iso        = dummy_flush_queue_iso,
 };
 
 void fw_card_release(struct kref *kref)
index 62ac111af2432d4f8f0c3216be5bf724343e4ea8..b1c11775839cd6543590d98a2d482dd3b9ef769c 100644 (file)
@@ -141,7 +141,6 @@ struct iso_resource {
        int generation;
        u64 channels;
        s32 bandwidth;
-       __be32 transaction_data[2];
        struct iso_resource_event *e_alloc, *e_dealloc;
 };
 
@@ -150,7 +149,7 @@ static void release_iso_resource(struct client *, struct client_resource *);
 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
 {
        client_get(r->client);
-       if (!schedule_delayed_work(&r->work, delay))
+       if (!queue_delayed_work(fw_workqueue, &r->work, delay))
                client_put(r->client);
 }
 
@@ -1108,6 +1107,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
                payload += u.packet.payload_length;
                count++;
        }
+       fw_iso_context_queue_flush(ctx);
 
        a->size    -= uptr_to_u64(p) - a->packets;
        a->packets  = uptr_to_u64(p);
@@ -1229,8 +1229,7 @@ static void iso_resource_work(struct work_struct *work)
                        r->channels, &channel, &bandwidth,
                        todo == ISO_RES_ALLOC ||
                        todo == ISO_RES_REALLOC ||
-                       todo == ISO_RES_ALLOC_ONCE,
-                       r->transaction_data);
+                       todo == ISO_RES_ALLOC_ONCE);
        /*
         * Is this generation outdated already?  As long as this resource sticks
         * in the idr, it will be scheduled again for a newer generation or at
index 9a262439e3a7220aa8bd4712acb73dc99b63fb62..95a4714018922b52ebf484897fe7bcbd106eb318 100644 (file)
@@ -725,6 +725,15 @@ struct fw_device *fw_device_get_by_devt(dev_t devt)
        return device;
 }
 
+struct workqueue_struct *fw_workqueue;
+EXPORT_SYMBOL(fw_workqueue);
+
+static void fw_schedule_device_work(struct fw_device *device,
+                                   unsigned long delay)
+{
+       queue_delayed_work(fw_workqueue, &device->work, delay);
+}
+
 /*
  * These defines control the retry behavior for reading the config
  * rom.  It shouldn't be necessary to tweak these; if the device
@@ -750,7 +759,7 @@ static void fw_device_shutdown(struct work_struct *work)
        if (time_before64(get_jiffies_64(),
                          device->card->reset_jiffies + SHUTDOWN_DELAY)
            && !list_empty(&device->card->link)) {
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+               fw_schedule_device_work(device, SHUTDOWN_DELAY);
                return;
        }
 
@@ -862,7 +871,7 @@ static int lookup_existing_device(struct device *dev, void *data)
                fw_notify("rediscovered device %s\n", dev_name(dev));
 
                PREPARE_DELAYED_WORK(&old->work, fw_device_update);
-               schedule_delayed_work(&old->work, 0);
+               fw_schedule_device_work(old, 0);
 
                if (current_node == card->root_node)
                        fw_schedule_bm_work(card, 0);
@@ -953,7 +962,7 @@ static void fw_device_init(struct work_struct *work)
                if (device->config_rom_retries < MAX_RETRIES &&
                    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
                        device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
+                       fw_schedule_device_work(device, RETRY_DELAY);
                } else {
                        if (device->node->link_on)
                                fw_notify("giving up on config rom for node id %x\n",
@@ -1019,7 +1028,7 @@ static void fw_device_init(struct work_struct *work)
                           FW_DEVICE_INITIALIZING,
                           FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
                PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+               fw_schedule_device_work(device, SHUTDOWN_DELAY);
        } else {
                if (device->config_rom_retries)
                        fw_notify("created device %s: GUID %08x%08x, S%d00, "
@@ -1098,7 +1107,7 @@ static void fw_device_refresh(struct work_struct *work)
                if (device->config_rom_retries < MAX_RETRIES / 2 &&
                    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
                        device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY / 2);
+                       fw_schedule_device_work(device, RETRY_DELAY / 2);
 
                        return;
                }
@@ -1131,7 +1140,7 @@ static void fw_device_refresh(struct work_struct *work)
                if (device->config_rom_retries < MAX_RETRIES &&
                    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
                        device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
+                       fw_schedule_device_work(device, RETRY_DELAY);
 
                        return;
                }
@@ -1158,7 +1167,7 @@ static void fw_device_refresh(struct work_struct *work)
  gone:
        atomic_set(&device->state, FW_DEVICE_GONE);
        PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-       schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+       fw_schedule_device_work(device, SHUTDOWN_DELAY);
  out:
        if (node_id == card->root_node->node_id)
                fw_schedule_bm_work(card, 0);
@@ -1214,7 +1223,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                 * first config rom scan half a second after bus reset.
                 */
                INIT_DELAYED_WORK(&device->work, fw_device_init);
-               schedule_delayed_work(&device->work, INITIAL_DELAY);
+               fw_schedule_device_work(device, INITIAL_DELAY);
                break;
 
        case FW_NODE_INITIATED_RESET:
@@ -1230,7 +1239,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                            FW_DEVICE_RUNNING,
                            FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
                        PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
-                       schedule_delayed_work(&device->work,
+                       fw_schedule_device_work(device,
                                device->is_local ? 0 : INITIAL_DELAY);
                }
                break;
@@ -1245,7 +1254,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                device->generation = card->generation;
                if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
                        PREPARE_DELAYED_WORK(&device->work, fw_device_update);
-                       schedule_delayed_work(&device->work, 0);
+                       fw_schedule_device_work(device, 0);
                }
                break;
 
@@ -1270,7 +1279,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
                if (atomic_xchg(&device->state,
                                FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
                        PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-                       schedule_delayed_work(&device->work,
+                       fw_schedule_device_work(device,
                                list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
                }
                break;
index 481056df9268db5cb51979c83a337715c7c6fb42..57c3973093ad13ab49355f6af6e9c38f05a5bcae 100644 (file)
@@ -185,6 +185,12 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
 }
 EXPORT_SYMBOL(fw_iso_context_queue);
 
+void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
+{
+       ctx->card->driver->flush_queue_iso(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_queue_flush);
+
 int fw_iso_context_stop(struct fw_iso_context *ctx)
 {
        return ctx->card->driver->stop_iso(ctx);
@@ -196,9 +202,10 @@ EXPORT_SYMBOL(fw_iso_context_stop);
  */
 
 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
-                           int bandwidth, bool allocate, __be32 data[2])
+                           int bandwidth, bool allocate)
 {
        int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
+       __be32 data[2];
 
        /*
         * On a 1394a IRM with low contention, try < 1 is enough.
@@ -233,9 +240,10 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
 }
 
 static int manage_channel(struct fw_card *card, int irm_id, int generation,
-               u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
+               u32 channels_mask, u64 offset, bool allocate)
 {
        __be32 bit, all, old;
+       __be32 data[2];
        int channel, ret = -EIO, retry = 5;
 
        old = all = allocate ? cpu_to_be32(~0) : 0;
@@ -284,7 +292,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
 }
 
 static void deallocate_channel(struct fw_card *card, int irm_id,
-                              int generation, int channel, __be32 buffer[2])
+                              int generation, int channel)
 {
        u32 mask;
        u64 offset;
@@ -293,7 +301,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
        offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
                                CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
 
-       manage_channel(card, irm_id, generation, mask, offset, false, buffer);
+       manage_channel(card, irm_id, generation, mask, offset, false);
 }
 
 /**
@@ -322,7 +330,7 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
  */
 void fw_iso_resource_manage(struct fw_card *card, int generation,
                            u64 channels_mask, int *channel, int *bandwidth,
-                           bool allocate, __be32 buffer[2])
+                           bool allocate)
 {
        u32 channels_hi = channels_mask;        /* channels 31...0 */
        u32 channels_lo = channels_mask >> 32;  /* channels 63...32 */
@@ -335,11 +343,11 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
        if (channels_hi)
                c = manage_channel(card, irm_id, generation, channels_hi,
                                CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
-                               allocate, buffer);
+                               allocate);
        if (channels_lo && c < 0) {
                c = manage_channel(card, irm_id, generation, channels_lo,
                                CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
-                               allocate, buffer);
+                               allocate);
                if (c >= 0)
                        c += 32;
        }
@@ -351,14 +359,13 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
        if (*bandwidth == 0)
                return;
 
-       ret = manage_bandwidth(card, irm_id, generation, *bandwidth,
-                              allocate, buffer);
+       ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
        if (ret < 0)
                *bandwidth = 0;
 
        if (allocate && ret < 0) {
                if (c >= 0)
-                       deallocate_channel(card, irm_id, generation, c, buffer);
+                       deallocate_channel(card, irm_id, generation, c);
                *channel = ret;
        }
 }
index d00f8ce902cc429914c31644a213531d6a239706..334b82a3542c18ddbc4c138deafdf1931068434d 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/string.h>
 #include <linux/timer.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 
 #include <asm/byteorder.h>
 
@@ -326,8 +327,8 @@ static int allocate_tlabel(struct fw_card *card)
  * It will contain tag, channel, and sy data instead of a node ID then.
  *
  * The payload buffer at @data is going to be DMA-mapped except in case of
- * quadlet-sized payload or of local (loopback) requests.  Hence make sure that
- * the buffer complies with the restrictions for DMA-mapped memory.  The
+ * @length <= 8 or of local (loopback) requests.  Hence make sure that the
+ * buffer complies with the restrictions of the streaming DMA mapping API.
  * @payload must not be freed before the @callback is called.
  *
  * In case of request types without payload, @data is NULL and @length is 0.
@@ -411,7 +412,8 @@ static void transaction_callback(struct fw_card *card, int rcode,
  *
  * Returns the RCODE.  See fw_send_request() for parameter documentation.
  * Unlike fw_send_request(), @data points to the payload of the request or/and
- * to the payload of the response.
+ * to the payload of the response.  DMA mapping restrictions apply to outbound
+ * request payloads of >= 8 bytes but not to inbound response payloads.
  */
 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
                       int generation, int speed, unsigned long long offset,
@@ -1212,13 +1214,21 @@ static int __init fw_core_init(void)
 {
        int ret;
 
+       fw_workqueue = alloc_workqueue("firewire",
+                                      WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+       if (!fw_workqueue)
+               return -ENOMEM;
+
        ret = bus_register(&fw_bus_type);
-       if (ret < 0)
+       if (ret < 0) {
+               destroy_workqueue(fw_workqueue);
                return ret;
+       }
 
        fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
        if (fw_cdev_major < 0) {
                bus_unregister(&fw_bus_type);
+               destroy_workqueue(fw_workqueue);
                return fw_cdev_major;
        }
 
@@ -1234,6 +1244,7 @@ static void __exit fw_core_cleanup(void)
 {
        unregister_chrdev(fw_cdev_major, "firewire");
        bus_unregister(&fw_bus_type);
+       destroy_workqueue(fw_workqueue);
        idr_destroy(&fw_device_idr);
 }
 
index 25e729cde2f74262b6caae39a4f51e6138e64b66..0fe4e4e6eda72a696596c7dbcce1a7f0f6538569 100644 (file)
@@ -97,6 +97,8 @@ struct fw_card_driver {
                         struct fw_iso_buffer *buffer,
                         unsigned long payload);
 
+       void (*flush_queue_iso)(struct fw_iso_context *ctx);
+
        int (*stop_iso)(struct fw_iso_context *ctx);
 };
 
index 3f04dd3681cf7698bf5b8f377db07e84fb8c726c..b9762d07198d3a6fb4383ebc3fcc60c17c4a17c0 100644 (file)
@@ -881,7 +881,9 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
-       if (retval < 0)
+       if (retval >= 0)
+               fw_iso_context_queue_flush(dev->broadcast_rcv_context);
+       else
                fw_error("requeue failed\n");
 }
 
index 23d1468ad253d54c9c0745a858b6b155123e76e7..438e6c83117087d8d10c13418ff3a68edf9314ff 100644 (file)
@@ -1006,13 +1006,12 @@ static void ar_context_run(struct ar_context *ctx)
 
 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
 {
-       int b, key;
+       __le16 branch;
 
-       b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
-       key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
+       branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
 
        /* figure out which descriptor the branch address goes in */
-       if (z == 2 && (b == 3 || key == 2))
+       if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
                return d;
        else
                return d + z - 1;
@@ -1193,9 +1192,6 @@ static void context_append(struct context *ctx,
        wmb(); /* finish init of new descriptors before branch_address update */
        ctx->prev->branch_address = cpu_to_le32(d_bus | z);
        ctx->prev = find_branch_descriptor(d, z);
-
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
-       flush_writes(ctx->ohci);
 }
 
 static void context_stop(struct context *ctx)
@@ -1218,6 +1214,7 @@ static void context_stop(struct context *ctx)
 }
 
 struct driver_data {
+       u8 inline_data[8];
        struct fw_packet *packet;
 };
 
@@ -1301,20 +1298,28 @@ static int at_context_queue_packet(struct context *ctx,
                return -1;
        }
 
+       BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
        driver_data = (struct driver_data *) &d[3];
        driver_data->packet = packet;
        packet->driver_data = driver_data;
 
        if (packet->payload_length > 0) {
-               payload_bus =
-                       dma_map_single(ohci->card.device, packet->payload,
-                                      packet->payload_length, DMA_TO_DEVICE);
-               if (dma_mapping_error(ohci->card.device, payload_bus)) {
-                       packet->ack = RCODE_SEND_ERROR;
-                       return -1;
+               if (packet->payload_length > sizeof(driver_data->inline_data)) {
+                       payload_bus = dma_map_single(ohci->card.device,
+                                                    packet->payload,
+                                                    packet->payload_length,
+                                                    DMA_TO_DEVICE);
+                       if (dma_mapping_error(ohci->card.device, payload_bus)) {
+                               packet->ack = RCODE_SEND_ERROR;
+                               return -1;
+                       }
+                       packet->payload_bus     = payload_bus;
+                       packet->payload_mapped  = true;
+               } else {
+                       memcpy(driver_data->inline_data, packet->payload,
+                              packet->payload_length);
+                       payload_bus = d_bus + 3 * sizeof(*d);
                }
-               packet->payload_bus     = payload_bus;
-               packet->payload_mapped  = true;
 
                d[2].req_count    = cpu_to_le16(packet->payload_length);
                d[2].data_address = cpu_to_le32(payload_bus);
@@ -1340,8 +1345,12 @@ static int at_context_queue_packet(struct context *ctx,
 
        context_append(ctx, d, z, 4 - z);
 
-       if (!ctx->running)
+       if (ctx->running) {
+               reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+               flush_writes(ohci);
+       } else {
                context_run(ctx, 0);
+       }
 
        return 0;
 }
@@ -2066,8 +2075,6 @@ static int ohci_enable(struct fw_card *card,
 
        reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
        reg_write(ohci, OHCI1394_LinkControlSet,
-                 OHCI1394_LinkControl_rcvSelfID |
-                 OHCI1394_LinkControl_rcvPhyPkt |
                  OHCI1394_LinkControl_cycleTimerEnable |
                  OHCI1394_LinkControl_cycleMaster);
 
@@ -2094,9 +2101,6 @@ static int ohci_enable(struct fw_card *card,
        reg_write(ohci, OHCI1394_FairnessControl, 0);
        card->priority_budget_implemented = ohci->pri_req_max != 0;
 
-       ar_context_run(&ohci->ar_request_ctx);
-       ar_context_run(&ohci->ar_response_ctx);
-
        reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
        reg_write(ohci, OHCI1394_IntEventClear, ~0);
        reg_write(ohci, OHCI1394_IntMaskClear, ~0);
@@ -2186,7 +2190,13 @@ static int ohci_enable(struct fw_card *card,
        reg_write(ohci, OHCI1394_HCControlSet,
                  OHCI1394_HCControl_linkEnable |
                  OHCI1394_HCControl_BIBimageValid);
-       flush_writes(ohci);
+
+       reg_write(ohci, OHCI1394_LinkControlSet,
+                 OHCI1394_LinkControl_rcvSelfID |
+                 OHCI1394_LinkControl_rcvPhyPkt);
+
+       ar_context_run(&ohci->ar_request_ctx);
+       ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
 
        /* We are ready to go, reset bus to finish initialization. */
        fw_schedule_bus_reset(&ohci->card, false, true);
@@ -3112,6 +3122,15 @@ static int ohci_queue_iso(struct fw_iso_context *base,
        return ret;
 }
 
+static void ohci_flush_queue_iso(struct fw_iso_context *base)
+{
+       struct context *ctx =
+                       &container_of(base, struct iso_context, base)->context;
+
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+       flush_writes(ctx->ohci);
+}
+
 static const struct fw_card_driver ohci_driver = {
        .enable                 = ohci_enable,
        .read_phy_reg           = ohci_read_phy_reg,
@@ -3128,6 +3147,7 @@ static const struct fw_card_driver ohci_driver = {
        .free_iso_context       = ohci_free_iso_context,
        .set_iso_channels       = ohci_set_iso_channels,
        .queue_iso              = ohci_queue_iso,
+       .flush_queue_iso        = ohci_flush_queue_iso,
        .start_iso              = ohci_start_iso,
        .stop_iso               = ohci_stop_iso,
 };
index 77ed589b360d7dce2d7567685fa032f96f40ed54..41841a3e3f99c9acd4c4fc72c972626d8dffdcee 100644 (file)
@@ -125,9 +125,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
        ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
        ", or a combination)");
 
-/* I don't know why the SCSI stack doesn't define something like this... */
-typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
-
 static const char sbp2_driver_name[] = "sbp2";
 
 /*
@@ -261,7 +258,6 @@ struct sbp2_orb {
        struct kref kref;
        dma_addr_t request_bus;
        int rcode;
-       struct sbp2_pointer pointer;
        void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
        struct list_head link;
 };
@@ -314,7 +310,6 @@ struct sbp2_command_orb {
                u8 command_block[SBP2_MAX_CDB_SIZE];
        } request;
        struct scsi_cmnd *cmd;
-       scsi_done_fn_t done;
        struct sbp2_logical_unit *lu;
 
        struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
@@ -494,10 +489,11 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
                          int node_id, int generation, u64 offset)
 {
        struct fw_device *device = target_device(lu->tgt);
+       struct sbp2_pointer orb_pointer;
        unsigned long flags;
 
-       orb->pointer.high = 0;
-       orb->pointer.low = cpu_to_be32(orb->request_bus);
+       orb_pointer.high = 0;
+       orb_pointer.low = cpu_to_be32(orb->request_bus);
 
        spin_lock_irqsave(&device->card->lock, flags);
        list_add_tail(&orb->link, &lu->orb_list);
@@ -508,7 +504,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
 
        fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
                        node_id, generation, device->max_speed, offset,
-                       &orb->pointer, 8, complete_transaction, orb);
+                       &orb_pointer, 8, complete_transaction, orb);
 }
 
 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
@@ -830,8 +826,6 @@ static void sbp2_target_put(struct sbp2_target *tgt)
        kref_put(&tgt->kref, sbp2_release_target);
 }
 
-static struct workqueue_struct *sbp2_wq;
-
 /*
  * Always get the target's kref when scheduling work on one its units.
  * Each workqueue job is responsible to call sbp2_target_put() upon return.
@@ -839,7 +833,7 @@ static struct workqueue_struct *sbp2_wq;
 static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
 {
        sbp2_target_get(lu->tgt);
-       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
+       if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
                sbp2_target_put(lu->tgt);
 }
 
@@ -1398,7 +1392,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
        sbp2_unmap_scatterlist(device->card->device, orb);
 
        orb->cmd->result = result;
-       orb->done(orb->cmd);
+       orb->cmd->scsi_done(orb->cmd);
 }
 
 static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
@@ -1463,7 +1457,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
 
 /* SCSI stack integration */
 
-static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
+                                 struct scsi_cmnd *cmd)
 {
        struct sbp2_logical_unit *lu = cmd->device->hostdata;
        struct fw_device *device = target_device(lu->tgt);
@@ -1477,7 +1472,7 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
        if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
                fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
                cmd->result = DID_ERROR << 16;
-               done(cmd);
+               cmd->scsi_done(cmd);
                return 0;
        }
 
@@ -1490,11 +1485,8 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
        /* Initialize rcode to something not RCODE_COMPLETE. */
        orb->base.rcode = -1;
        kref_init(&orb->base.kref);
-
-       orb->lu   = lu;
-       orb->done = done;
-       orb->cmd  = cmd;
-
+       orb->lu = lu;
+       orb->cmd = cmd;
        orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
        orb->request.misc = cpu_to_be32(
                COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
@@ -1529,8 +1521,6 @@ static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done
        return retval;
 }
 
-static DEF_SCSI_QCMD(sbp2_scsi_queuecommand)
-
 static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
 {
        struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1653,17 +1643,12 @@ MODULE_ALIAS("sbp2");
 
 static int __init sbp2_init(void)
 {
-       sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
-       if (!sbp2_wq)
-               return -ENOMEM;
-
        return driver_register(&sbp2_driver.driver);
 }
 
 static void __exit sbp2_cleanup(void)
 {
        driver_unregister(&sbp2_driver.driver);
-       destroy_workqueue(sbp2_wq);
 }
 
 module_init(sbp2_init);
index c26c11905ffe0063eec29b07a236b5e24a0922c9..2af8cb460a3bc7cfd33531c0304d6ca6cff56d90 100644 (file)
@@ -416,21 +416,21 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
 
        out_obj = output.pointer;
        if (out_obj->type != ACPI_TYPE_BUFFER) {
-               kfree(output.pointer);
                DEBPRINT("Run _GTM: error: "
                       "expected object type of ACPI_TYPE_BUFFER, "
                       "got 0x%x\n", out_obj->type);
+               kfree(output.pointer);
                return;
        }
 
        if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
            out_obj->buffer.length != sizeof(struct GTM_buffer)) {
-               kfree(output.pointer);
                printk(KERN_ERR
                        "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
                        "addr (0x%p)\n",
                        __func__, out_obj->buffer.length,
                        sizeof(struct GTM_buffer), out_obj->buffer.pointer);
+               kfree(output.pointer);
                return;
        }
 
index 5a702d02c848a172dc031a2ac53adca6882b6510..61fdf544fbd63193926010ff33fef34f9d30dbe9 100644 (file)
@@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
                drive->failed_pc = NULL;
 
        if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
-           (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC))
+           rq->cmd_type == REQ_TYPE_BLOCK_PC)
                uptodate = 1; /* FIXME */
        else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
 
index 0e79efff1debdd5c357c658b77b0ec1a60e735e2..c3da53e7bb2b481735569832bfe7928b66f26756 100644 (file)
@@ -88,7 +88,7 @@ static int __init ide_scan_pcibus(void)
        struct list_head *l, *n;
 
        pre_init = 0;
-       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
+       for_each_pci_dev(dev)
                ide_scan_pcidev(dev);
 
        /*
index ebcf8e470a97b2685a1bf9b63b2e16c7a0b3b581..1db7c4368dbf6e3ab8c9b8269c896ee8e03c03d7 100644 (file)
@@ -1334,7 +1334,7 @@ out_free_pmif:
 static int
 pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+       pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
        int rc = 0;
 
        if (mesg.event != pdev->dev.power.power_state.event
@@ -1350,7 +1350,7 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
 static int
 pmac_ide_pci_resume(struct pci_dev *pdev)
 {
-       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+       pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
        int rc = 0;
 
        if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
index dc85d777578ed087979b94829c886919c03c008a..0cfc455630d0a4319de151ff2f9b49c13cefec8d 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
index 5c9362792f1d928d71ded12a61b919c9ee25e7be..70bd738b8b99eff89185b8dee59f5a4f6599e4d3 100644 (file)
@@ -493,11 +493,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
        spin_unlock_irqrestore(&bitmap->lock, flags);
        sb = kmap_atomic(bitmap->sb_page, KM_USER0);
        sb->events = cpu_to_le64(bitmap->mddev->events);
-       if (bitmap->mddev->events < bitmap->events_cleared) {
+       if (bitmap->mddev->events < bitmap->events_cleared)
                /* rocking back to read-only */
                bitmap->events_cleared = bitmap->mddev->events;
-               sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
-       }
+       sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
+       sb->state = cpu_to_le32(bitmap->flags);
        /* Just in case these have been changed via sysfs: */
        sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
        sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
@@ -618,7 +618,7 @@ success:
        if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
                bitmap->flags |= BITMAP_HOSTENDIAN;
        bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
-       if (sb->state & cpu_to_le32(BITMAP_STALE))
+       if (bitmap->flags & BITMAP_STALE)
                bitmap->events_cleared = bitmap->mddev->events;
        err = 0;
 out:
@@ -652,9 +652,11 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
        switch (op) {
        case MASK_SET:
                sb->state |= cpu_to_le32(bits);
+               bitmap->flags |= bits;
                break;
        case MASK_UNSET:
                sb->state &= cpu_to_le32(~bits);
+               bitmap->flags &= ~bits;
                break;
        default:
                BUG();
index 7d6f7f18a92052e3258ffeae719c377b2df4fb78..aa640a85bb2169aa14fb8d9875beb0ac9091192c 100644 (file)
@@ -3324,7 +3324,7 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
        char *e;
        unsigned long long n = simple_strtoull(buf, &e, 10);
 
-       if (mddev->pers)
+       if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
                return -EBUSY;
        if (cmd_match(buf, "none"))
                n = MaxSector;
@@ -4347,13 +4347,19 @@ static int md_alloc(dev_t dev, char *name)
        disk->fops = &md_fops;
        disk->private_data = mddev;
        disk->queue = mddev->queue;
+       blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
        /* Allow extended partitions.  This makes the
         * 'mdp' device redundant, but we can't really
         * remove it now.
         */
        disk->flags |= GENHD_FL_EXT_DEVT;
-       add_disk(disk);
        mddev->gendisk = disk;
+       /* As soon as we call add_disk(), another thread could get
+        * through to md_open, so make sure it doesn't get too far
+        */
+       mutex_lock(&mddev->open_mutex);
+       add_disk(disk);
+
        error = kobject_init_and_add(&mddev->kobj, &md_ktype,
                                     &disk_to_dev(disk)->kobj, "%s", "md");
        if (error) {
@@ -4367,8 +4373,7 @@ static int md_alloc(dev_t dev, char *name)
        if (mddev->kobj.sd &&
            sysfs_create_group(&mddev->kobj, &md_bitmap_group))
                printk(KERN_DEBUG "pointless warning\n");
-
-       blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
+       mutex_unlock(&mddev->open_mutex);
  abort:
        mutex_unlock(&disks_mutex);
        if (!error && mddev->kobj.sd) {
@@ -5211,6 +5216,16 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                } else
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
+               if ((info->state & (1<<MD_DISK_SYNC)) &&
+                   (!test_bit(In_sync, &rdev->flags) ||
+                    rdev->raid_disk != info->raid_disk)) {
+                       /* This was a hot-add request, but events doesn't
+                        * match, so reject it.
+                        */
+                       export_rdev(rdev);
+                       return -EINVAL;
+               }
+
                if (test_bit(In_sync, &rdev->flags))
                        rdev->saved_raid_disk = rdev->raid_disk;
                else
index c35890990985632646483a97e849277023850b1a..3535c23af288e8eaac45042eb2e1909af8f0759a 100644 (file)
@@ -146,7 +146,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
        int i;
        
        seq_printf (seq, " [%d/%d] [", conf->raid_disks,
-                                                conf->working_disks);
+                   conf->raid_disks - mddev->degraded);
        for (i = 0; i < conf->raid_disks; i++)
                seq_printf (seq, "%s",
                               conf->multipaths[i].rdev && 
@@ -186,35 +186,36 @@ static int multipath_congested(void *data, int bits)
 static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
 {
        multipath_conf_t *conf = mddev->private;
+       char b[BDEVNAME_SIZE];
 
-       if (conf->working_disks <= 1) {
+       if (conf->raid_disks - mddev->degraded <= 1) {
                /*
                 * Uh oh, we can do nothing if this is our last path, but
                 * first check if this is a queued request for a device
                 * which has just failed.
                 */
                printk(KERN_ALERT 
-                       "multipath: only one IO path left and IO error.\n");
+                      "multipath: only one IO path left and IO error.\n");
                /* leave it active... it's all we have */
-       } else {
-               /*
-                * Mark disk as unusable
-                */
-               if (!test_bit(Faulty, &rdev->flags)) {
-                       char b[BDEVNAME_SIZE];
-                       clear_bit(In_sync, &rdev->flags);
-                       set_bit(Faulty, &rdev->flags);
-                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                       conf->working_disks--;
-                       mddev->degraded++;
-                       printk(KERN_ALERT "multipath: IO failure on %s,"
-                               " disabling IO path.\n"
-                               "multipath: Operation continuing"
-                               " on %d IO paths.\n",
-                               bdevname (rdev->bdev,b),
-                               conf->working_disks);
-               }
+               return;
+       }
+       /*
+        * Mark disk as unusable
+        */
+       if (test_and_clear_bit(In_sync, &rdev->flags)) {
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               mddev->degraded++;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
        }
+       set_bit(Faulty, &rdev->flags);
+       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       printk(KERN_ALERT "multipath: IO failure on %s,"
+              " disabling IO path.\n"
+              "multipath: Operation continuing"
+              " on %d IO paths.\n",
+              bdevname(rdev->bdev, b),
+              conf->raid_disks - mddev->degraded);
 }
 
 static void print_multipath_conf (multipath_conf_t *conf)
@@ -227,7 +228,7 @@ static void print_multipath_conf (multipath_conf_t *conf)
                printk("(conf==NULL)\n");
                return;
        }
-       printk(" --- wd:%d rd:%d\n", conf->working_disks,
+       printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
                         conf->raid_disks);
 
        for (i = 0; i < conf->raid_disks; i++) {
@@ -274,10 +275,11 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                                                           PAGE_CACHE_SIZE - 1);
                        }
 
-                       conf->working_disks++;
+                       spin_lock_irq(&conf->device_lock);
                        mddev->degraded--;
                        rdev->raid_disk = path;
                        set_bit(In_sync, &rdev->flags);
+                       spin_unlock_irq(&conf->device_lock);
                        rcu_assign_pointer(p->rdev, rdev);
                        err = 0;
                        md_integrity_add_rdev(rdev, mddev);
@@ -391,6 +393,7 @@ static int multipath_run (mddev_t *mddev)
        int disk_idx;
        struct multipath_info *disk;
        mdk_rdev_t *rdev;
+       int working_disks;
 
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
@@ -424,7 +427,7 @@ static int multipath_run (mddev_t *mddev)
                goto out_free_conf;
        }
 
-       conf->working_disks = 0;
+       working_disks = 0;
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                disk_idx = rdev->raid_disk;
                if (disk_idx < 0 ||
@@ -446,7 +449,7 @@ static int multipath_run (mddev_t *mddev)
                }
 
                if (!test_bit(Faulty, &rdev->flags))
-                       conf->working_disks++;
+                       working_disks++;
        }
 
        conf->raid_disks = mddev->raid_disks;
@@ -454,12 +457,12 @@ static int multipath_run (mddev_t *mddev)
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
 
-       if (!conf->working_disks) {
+       if (!working_disks) {
                printk(KERN_ERR "multipath: no operational IO paths for %s\n",
                        mdname(mddev));
                goto out_free_conf;
        }
-       mddev->degraded = conf->raid_disks - conf->working_disks;
+       mddev->degraded = conf->raid_disks - working_disks;
 
        conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
                                                 sizeof(struct multipath_bh));
@@ -481,7 +484,8 @@ static int multipath_run (mddev_t *mddev)
 
        printk(KERN_INFO 
                "multipath: array %s active with %d out of %d IO paths\n",
-               mdname(mddev), conf->working_disks, mddev->raid_disks);
+               mdname(mddev), conf->raid_disks - mddev->degraded,
+              mddev->raid_disks);
        /*
         * Ok, everything is just fine now
         */
index d1c2a8d7839553f28e79b04ec2f585ac8634c815..3c5a45eb5f8a14ef8e8caf372470a73ce0dc7700 100644 (file)
@@ -9,7 +9,6 @@ struct multipath_private_data {
        mddev_t                 *mddev;
        struct multipath_info   *multipaths;
        int                     raid_disks;
-       int                     working_disks;
        spinlock_t              device_lock;
        struct list_head        retry_list;
 
index 2b7a7ff401dc6fba50047bbc3f0fb1980055da13..5d096096f9584972a9da2197ed1bef9303edf95f 100644 (file)
@@ -297,23 +297,24 @@ static void raid1_end_read_request(struct bio *bio, int error)
        rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
 }
 
-static void r1_bio_write_done(r1bio_t *r1_bio, int vcnt, struct bio_vec *bv,
-                             int behind)
+static void r1_bio_write_done(r1bio_t *r1_bio)
 {
        if (atomic_dec_and_test(&r1_bio->remaining))
        {
                /* it really is the end of this request */
                if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
                        /* free extra copy of the data pages */
-                       int i = vcnt;
+                       int i = r1_bio->behind_page_count;
                        while (i--)
-                               safe_put_page(bv[i].bv_page);
+                               safe_put_page(r1_bio->behind_pages[i]);
+                       kfree(r1_bio->behind_pages);
+                       r1_bio->behind_pages = NULL;
                }
                /* clear the bitmap if all writes complete successfully */
                bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
                                r1_bio->sectors,
                                !test_bit(R1BIO_Degraded, &r1_bio->state),
-                               behind);
+                               test_bit(R1BIO_BehindIO, &r1_bio->state));
                md_write_end(r1_bio->mddev);
                raid_end_bio_io(r1_bio);
        }
@@ -386,7 +387,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       r1_bio_write_done(r1_bio, bio->bi_vcnt, bio->bi_io_vec, behind);
+       r1_bio_write_done(r1_bio);
 
        if (to_put)
                bio_put(to_put);
@@ -411,10 +412,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
 {
        const sector_t this_sector = r1_bio->sector;
        const int sectors = r1_bio->sectors;
-       int new_disk = -1;
        int start_disk;
+       int best_disk;
        int i;
-       sector_t new_distance, current_distance;
+       sector_t best_dist;
        mdk_rdev_t *rdev;
        int choose_first;
 
@@ -425,6 +426,8 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
         * We take the first readable disk when above the resync window.
         */
  retry:
+       best_disk = -1;
+       best_dist = MaxSector;
        if (conf->mddev->recovery_cp < MaxSector &&
            (this_sector + sectors >= conf->next_resync)) {
                choose_first = 1;
@@ -434,8 +437,8 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
                start_disk = conf->last_used;
        }
 
-       /* make sure the disk is operational */
        for (i = 0 ; i < conf->raid_disks ; i++) {
+               sector_t dist;
                int disk = start_disk + i;
                if (disk >= conf->raid_disks)
                        disk -= conf->raid_disks;
@@ -443,60 +446,43 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
                    || rdev == NULL
-                   || !test_bit(In_sync, &rdev->flags))
+                   || test_bit(Faulty, &rdev->flags))
                        continue;
-
-               new_disk = disk;
-               if (!test_bit(WriteMostly, &rdev->flags))
-                       break;
-       }
-
-       if (new_disk < 0 || choose_first)
-               goto rb_out;
-
-       /*
-        * Don't change to another disk for sequential reads:
-        */
-       if (conf->next_seq_sect == this_sector)
-               goto rb_out;
-       if (this_sector == conf->mirrors[new_disk].head_position)
-               goto rb_out;
-
-       current_distance = abs(this_sector 
-                              - conf->mirrors[new_disk].head_position);
-
-       /* look for a better disk - i.e. head is closer */
-       start_disk = new_disk;
-       for (i = 1; i < conf->raid_disks; i++) {
-               int disk = start_disk + 1;
-               if (disk >= conf->raid_disks)
-                       disk -= conf->raid_disks;
-
-               rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               if (r1_bio->bios[disk] == IO_BLOCKED
-                   || rdev == NULL
-                   || !test_bit(In_sync, &rdev->flags)
-                   || test_bit(WriteMostly, &rdev->flags))
+               if (!test_bit(In_sync, &rdev->flags) &&
+                   rdev->recovery_offset < this_sector + sectors)
                        continue;
-
-               if (!atomic_read(&rdev->nr_pending)) {
-                       new_disk = disk;
+               if (test_bit(WriteMostly, &rdev->flags)) {
+                       /* Don't balance among write-mostly, just
+                        * use the first as a last resort */
+                       if (best_disk < 0)
+                               best_disk = disk;
+                       continue;
+               }
+               /* This is a reasonable device to use.  It might
+                * even be best.
+                */
+               dist = abs(this_sector - conf->mirrors[disk].head_position);
+               if (choose_first
+                   /* Don't change to another disk for sequential reads */
+                   || conf->next_seq_sect == this_sector
+                   || dist == 0
+                   /* If device is idle, use it */
+                   || atomic_read(&rdev->nr_pending) == 0) {
+                       best_disk = disk;
                        break;
                }
-               new_distance = abs(this_sector - conf->mirrors[disk].head_position);
-               if (new_distance < current_distance) {
-                       current_distance = new_distance;
-                       new_disk = disk;
+               if (dist < best_dist) {
+                       best_dist = dist;
+                       best_disk = disk;
                }
        }
 
- rb_out:
-       if (new_disk >= 0) {
-               rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+       if (best_disk >= 0) {
+               rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
                if (!rdev)
                        goto retry;
                atomic_inc(&rdev->nr_pending);
-               if (!test_bit(In_sync, &rdev->flags)) {
+               if (test_bit(Faulty, &rdev->flags)) {
                        /* cannot risk returning a device that failed
                         * before we inc'ed nr_pending
                         */
@@ -504,11 +490,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
                        goto retry;
                }
                conf->next_seq_sect = this_sector + sectors;
-               conf->last_used = new_disk;
+               conf->last_used = best_disk;
        }
        rcu_read_unlock();
 
-       return new_disk;
+       return best_disk;
 }
 
 static int raid1_congested(void *data, int bits)
@@ -675,37 +661,36 @@ static void unfreeze_array(conf_t *conf)
 
 
 /* duplicate the data pages for behind I/O 
- * We return a list of bio_vec rather than just page pointers
- * as it makes freeing easier
  */
-static struct bio_vec *alloc_behind_pages(struct bio *bio)
+static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
 {
        int i;
        struct bio_vec *bvec;
-       struct bio_vec *pages = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
+       struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
                                        GFP_NOIO);
        if (unlikely(!pages))
-               goto do_sync_io;
+               return;
 
        bio_for_each_segment(bvec, bio, i) {
-               pages[i].bv_page = alloc_page(GFP_NOIO);
-               if (unlikely(!pages[i].bv_page))
+               pages[i] = alloc_page(GFP_NOIO);
+               if (unlikely(!pages[i]))
                        goto do_sync_io;
-               memcpy(kmap(pages[i].bv_page) + bvec->bv_offset,
+               memcpy(kmap(pages[i]) + bvec->bv_offset,
                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
-               kunmap(pages[i].bv_page);
+               kunmap(pages[i]);
                kunmap(bvec->bv_page);
        }
-
-       return pages;
+       r1_bio->behind_pages = pages;
+       r1_bio->behind_page_count = bio->bi_vcnt;
+       set_bit(R1BIO_BehindIO, &r1_bio->state);
+       return;
 
 do_sync_io:
-       if (pages)
-               for (i = 0; i < bio->bi_vcnt && pages[i].bv_page; i++)
-                       put_page(pages[i].bv_page);
+       for (i = 0; i < bio->bi_vcnt; i++)
+               if (pages[i])
+                       put_page(pages[i]);
        kfree(pages);
        PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
-       return NULL;
 }
 
 static int make_request(mddev_t *mddev, struct bio * bio)
@@ -717,7 +702,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i, targets = 0, disks;
        struct bitmap *bitmap;
        unsigned long flags;
-       struct bio_vec *behind_pages = NULL;
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
@@ -870,9 +854,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        if (bitmap &&
            (atomic_read(&bitmap->behind_writes)
             < mddev->bitmap_info.max_write_behind) &&
-           !waitqueue_active(&bitmap->behind_wait) &&
-           (behind_pages = alloc_behind_pages(bio)) != NULL)
-               set_bit(R1BIO_BehindIO, &r1_bio->state);
+           !waitqueue_active(&bitmap->behind_wait))
+               alloc_behind_pages(bio, r1_bio);
 
        atomic_set(&r1_bio->remaining, 1);
        atomic_set(&r1_bio->behind_remaining, 0);
@@ -893,7 +876,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                mbio->bi_rw = WRITE | do_flush_fua | do_sync;
                mbio->bi_private = r1_bio;
 
-               if (behind_pages) {
+               if (r1_bio->behind_pages) {
                        struct bio_vec *bvec;
                        int j;
 
@@ -905,7 +888,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                         * them all
                         */
                        __bio_for_each_segment(bvec, mbio, j, 0)
-                               bvec->bv_page = behind_pages[j].bv_page;
+                               bvec->bv_page = r1_bio->behind_pages[j];
                        if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
                }
@@ -915,8 +898,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                bio_list_add(&conf->pending_bio_list, mbio);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
-       r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
-       kfree(behind_pages); /* the behind pages are attached to the bios now */
+       r1_bio_write_done(r1_bio);
 
        /* In case raid1d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
@@ -1196,194 +1178,210 @@ static void end_sync_write(struct bio *bio, int error)
        }
 }
 
-static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
+static int fix_sync_read_error(r1bio_t *r1_bio)
 {
+       /* Try some synchronous reads of other devices to get
+        * good data, much like with normal read errors.  Only
+        * read into the pages we already have so we don't
+        * need to re-issue the read request.
+        * We don't need to freeze the array, because being in an
+        * active sync request, there is no normal IO, and
+        * no overlapping syncs.
+        */
+       mddev_t *mddev = r1_bio->mddev;
        conf_t *conf = mddev->private;
-       int i;
-       int disks = conf->raid_disks;
-       struct bio *bio, *wbio;
-
-       bio = r1_bio->bios[r1_bio->read_disk];
+       struct bio *bio = r1_bio->bios[r1_bio->read_disk];
+       sector_t sect = r1_bio->sector;
+       int sectors = r1_bio->sectors;
+       int idx = 0;
 
+       while(sectors) {
+               int s = sectors;
+               int d = r1_bio->read_disk;
+               int success = 0;
+               mdk_rdev_t *rdev;
+               int start;
 
-       if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
-               /* We have read all readable devices.  If we haven't
-                * got the block, then there is no hope left.
-                * If we have, then we want to do a comparison
-                * and skip the write if everything is the same.
-                * If any blocks failed to read, then we need to
-                * attempt an over-write
-                */
-               int primary;
-               if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
-                       for (i=0; i<mddev->raid_disks; i++)
-                               if (r1_bio->bios[i]->bi_end_io == end_sync_read)
-                                       md_error(mddev, conf->mirrors[i].rdev);
+               if (s > (PAGE_SIZE>>9))
+                       s = PAGE_SIZE >> 9;
+               do {
+                       if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
+                               /* No rcu protection needed here devices
+                                * can only be removed when no resync is
+                                * active, and resync is currently active
+                                */
+                               rdev = conf->mirrors[d].rdev;
+                               if (sync_page_io(rdev,
+                                                sect,
+                                                s<<9,
+                                                bio->bi_io_vec[idx].bv_page,
+                                                READ, false)) {
+                                       success = 1;
+                                       break;
+                               }
+                       }
+                       d++;
+                       if (d == conf->raid_disks)
+                               d = 0;
+               } while (!success && d != r1_bio->read_disk);
 
-                       md_done_sync(mddev, r1_bio->sectors, 1);
+               if (!success) {
+                       char b[BDEVNAME_SIZE];
+                       /* Cannot read from anywhere, array is toast */
+                       md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+                       printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
+                              " for block %llu\n",
+                              mdname(mddev),
+                              bdevname(bio->bi_bdev, b),
+                              (unsigned long long)r1_bio->sector);
+                       md_done_sync(mddev, r1_bio->sectors, 0);
                        put_buf(r1_bio);
-                       return;
+                       return 0;
                }
-               for (primary=0; primary<mddev->raid_disks; primary++)
-                       if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
-                           test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
-                               r1_bio->bios[primary]->bi_end_io = NULL;
-                               rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
-                               break;
-                       }
-               r1_bio->read_disk = primary;
-               for (i=0; i<mddev->raid_disks; i++)
-                       if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
-                               int j;
-                               int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
-                               struct bio *pbio = r1_bio->bios[primary];
-                               struct bio *sbio = r1_bio->bios[i];
-
-                               if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
-                                       for (j = vcnt; j-- ; ) {
-                                               struct page *p, *s;
-                                               p = pbio->bi_io_vec[j].bv_page;
-                                               s = sbio->bi_io_vec[j].bv_page;
-                                               if (memcmp(page_address(p),
-                                                          page_address(s),
-                                                          PAGE_SIZE))
-                                                       break;
-                                       }
-                               } else
-                                       j = 0;
-                               if (j >= 0)
-                                       mddev->resync_mismatches += r1_bio->sectors;
-                               if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-                                             && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
-                                       sbio->bi_end_io = NULL;
-                                       rdev_dec_pending(conf->mirrors[i].rdev, mddev);
-                               } else {
-                                       /* fixup the bio for reuse */
-                                       int size;
-                                       sbio->bi_vcnt = vcnt;
-                                       sbio->bi_size = r1_bio->sectors << 9;
-                                       sbio->bi_idx = 0;
-                                       sbio->bi_phys_segments = 0;
-                                       sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
-                                       sbio->bi_flags |= 1 << BIO_UPTODATE;
-                                       sbio->bi_next = NULL;
-                                       sbio->bi_sector = r1_bio->sector +
-                                               conf->mirrors[i].rdev->data_offset;
-                                       sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
-                                       size = sbio->bi_size;
-                                       for (j = 0; j < vcnt ; j++) {
-                                               struct bio_vec *bi;
-                                               bi = &sbio->bi_io_vec[j];
-                                               bi->bv_offset = 0;
-                                               if (size > PAGE_SIZE)
-                                                       bi->bv_len = PAGE_SIZE;
-                                               else
-                                                       bi->bv_len = size;
-                                               size -= PAGE_SIZE;
-                                               memcpy(page_address(bi->bv_page),
-                                                      page_address(pbio->bi_io_vec[j].bv_page),
-                                                      PAGE_SIZE);
-                                       }
 
-                               }
-                       }
+               start = d;
+               /* write it back and re-read */
+               while (d != r1_bio->read_disk) {
+                       if (d == 0)
+                               d = conf->raid_disks;
+                       d--;
+                       if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+                               continue;
+                       rdev = conf->mirrors[d].rdev;
+                       if (sync_page_io(rdev,
+                                        sect,
+                                        s<<9,
+                                        bio->bi_io_vec[idx].bv_page,
+                                        WRITE, false) == 0) {
+                               r1_bio->bios[d]->bi_end_io = NULL;
+                               rdev_dec_pending(rdev, mddev);
+                               md_error(mddev, rdev);
+                       } else
+                               atomic_add(s, &rdev->corrected_errors);
+               }
+               d = start;
+               while (d != r1_bio->read_disk) {
+                       if (d == 0)
+                               d = conf->raid_disks;
+                       d--;
+                       if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+                               continue;
+                       rdev = conf->mirrors[d].rdev;
+                       if (sync_page_io(rdev,
+                                        sect,
+                                        s<<9,
+                                        bio->bi_io_vec[idx].bv_page,
+                                        READ, false) == 0)
+                               md_error(mddev, rdev);
+               }
+               sectors -= s;
+               sect += s;
+               idx ++;
        }
-       if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
-               /* ouch - failed to read all of that.
-                * Try some synchronous reads of other devices to get
-                * good data, much like with normal read errors.  Only
-                * read into the pages we already have so we don't
-                * need to re-issue the read request.
-                * We don't need to freeze the array, because being in an
-                * active sync request, there is no normal IO, and
-                * no overlapping syncs.
-                */
-               sector_t sect = r1_bio->sector;
-               int sectors = r1_bio->sectors;
-               int idx = 0;
-
-               while(sectors) {
-                       int s = sectors;
-                       int d = r1_bio->read_disk;
-                       int success = 0;
-                       mdk_rdev_t *rdev;
-
-                       if (s > (PAGE_SIZE>>9))
-                               s = PAGE_SIZE >> 9;
-                       do {
-                               if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
-                                       /* No rcu protection needed here devices
-                                        * can only be removed when no resync is
-                                        * active, and resync is currently active
-                                        */
-                                       rdev = conf->mirrors[d].rdev;
-                                       if (sync_page_io(rdev,
-                                                        sect,
-                                                        s<<9,
-                                                        bio->bi_io_vec[idx].bv_page,
-                                                        READ, false)) {
-                                               success = 1;
-                                               break;
-                                       }
-                               }
-                               d++;
-                               if (d == conf->raid_disks)
-                                       d = 0;
-                       } while (!success && d != r1_bio->read_disk);
-
-                       if (success) {
-                               int start = d;
-                               /* write it back and re-read */
-                               set_bit(R1BIO_Uptodate, &r1_bio->state);
-                               while (d != r1_bio->read_disk) {
-                                       if (d == 0)
-                                               d = conf->raid_disks;
-                                       d--;
-                                       if (r1_bio->bios[d]->bi_end_io != end_sync_read)
-                                               continue;
-                                       rdev = conf->mirrors[d].rdev;
-                                       atomic_add(s, &rdev->corrected_errors);
-                                       if (sync_page_io(rdev,
-                                                        sect,
-                                                        s<<9,
-                                                        bio->bi_io_vec[idx].bv_page,
-                                                        WRITE, false) == 0)
-                                               md_error(mddev, rdev);
-                               }
-                               d = start;
-                               while (d != r1_bio->read_disk) {
-                                       if (d == 0)
-                                               d = conf->raid_disks;
-                                       d--;
-                                       if (r1_bio->bios[d]->bi_end_io != end_sync_read)
-                                               continue;
-                                       rdev = conf->mirrors[d].rdev;
-                                       if (sync_page_io(rdev,
-                                                        sect,
-                                                        s<<9,
-                                                        bio->bi_io_vec[idx].bv_page,
-                                                        READ, false) == 0)
-                                               md_error(mddev, rdev);
-                               }
-                       } else {
-                               char b[BDEVNAME_SIZE];
-                               /* Cannot read from anywhere, array is toast */
-                               md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
-                               printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
-                                      " for block %llu\n",
-                                      mdname(mddev),
-                                      bdevname(bio->bi_bdev, b),
-                                      (unsigned long long)r1_bio->sector);
-                               md_done_sync(mddev, r1_bio->sectors, 0);
-                               put_buf(r1_bio);
-                               return;
+       set_bit(R1BIO_Uptodate, &r1_bio->state);
+       set_bit(BIO_UPTODATE, &bio->bi_flags);
+       return 1;
+}
+
+static int process_checks(r1bio_t *r1_bio)
+{
+       /* We have read all readable devices.  If we haven't
+        * got the block, then there is no hope left.
+        * If we have, then we want to do a comparison
+        * and skip the write if everything is the same.
+        * If any blocks failed to read, then we need to
+        * attempt an over-write
+        */
+       mddev_t *mddev = r1_bio->mddev;
+       conf_t *conf = mddev->private;
+       int primary;
+       int i;
+
+       for (primary = 0; primary < conf->raid_disks; primary++)
+               if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
+                   test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+                       r1_bio->bios[primary]->bi_end_io = NULL;
+                       rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
+                       break;
+               }
+       r1_bio->read_disk = primary;
+       for (i = 0; i < conf->raid_disks; i++) {
+               int j;
+               int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
+               struct bio *pbio = r1_bio->bios[primary];
+               struct bio *sbio = r1_bio->bios[i];
+               int size;
+
+               if (r1_bio->bios[i]->bi_end_io != end_sync_read)
+                       continue;
+
+               if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+                       for (j = vcnt; j-- ; ) {
+                               struct page *p, *s;
+                               p = pbio->bi_io_vec[j].bv_page;
+                               s = sbio->bi_io_vec[j].bv_page;
+                               if (memcmp(page_address(p),
+                                          page_address(s),
+                                          PAGE_SIZE))
+                                       break;
                        }
-                       sectors -= s;
-                       sect += s;
-                       idx ++;
+               } else
+                       j = 0;
+               if (j >= 0)
+                       mddev->resync_mismatches += r1_bio->sectors;
+               if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
+                             && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+                       /* No need to write to this device. */
+                       sbio->bi_end_io = NULL;
+                       rdev_dec_pending(conf->mirrors[i].rdev, mddev);
+                       continue;
+               }
+               /* fixup the bio for reuse */
+               sbio->bi_vcnt = vcnt;
+               sbio->bi_size = r1_bio->sectors << 9;
+               sbio->bi_idx = 0;
+               sbio->bi_phys_segments = 0;
+               sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
+               sbio->bi_flags |= 1 << BIO_UPTODATE;
+               sbio->bi_next = NULL;
+               sbio->bi_sector = r1_bio->sector +
+                       conf->mirrors[i].rdev->data_offset;
+               sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+               size = sbio->bi_size;
+               for (j = 0; j < vcnt ; j++) {
+                       struct bio_vec *bi;
+                       bi = &sbio->bi_io_vec[j];
+                       bi->bv_offset = 0;
+                       if (size > PAGE_SIZE)
+                               bi->bv_len = PAGE_SIZE;
+                       else
+                               bi->bv_len = size;
+                       size -= PAGE_SIZE;
+                       memcpy(page_address(bi->bv_page),
+                              page_address(pbio->bi_io_vec[j].bv_page),
+                              PAGE_SIZE);
                }
        }
+       return 0;
+}
 
+static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
+{
+       conf_t *conf = mddev->private;
+       int i;
+       int disks = conf->raid_disks;
+       struct bio *bio, *wbio;
+
+       bio = r1_bio->bios[r1_bio->read_disk];
+
+       if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
+               /* ouch - failed to read all of that. */
+               if (!fix_sync_read_error(r1_bio))
+                       return;
+
+       if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+               if (process_checks(r1_bio) < 0)
+                       return;
        /*
         * schedule writes
         */
@@ -2063,7 +2061,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors &&
-           mddev->recovery_cp == MaxSector) {
+           mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        }
index cbfdf1a6acd98e6f501d29912c2bc718c39450df..5fc4ca1af8639b5a61a6a17901866fe0987effcc 100644 (file)
@@ -94,7 +94,9 @@ struct r1bio_s {
        int                     read_disk;
 
        struct list_head        retry_list;
-       struct bitmap_update    *bitmap_update;
+       /* Next two are only valid when R1BIO_BehindIO is set */
+       struct page             **behind_pages;
+       int                     behind_page_count;
        /*
         * if the IO is in WRITE direction, then multiple bios are used.
         * We choose the number when they are allocated.
index 8e9462626ec5cb8d7441b46ddae9a848772153fe..6e846688962fccfd9c7ceddd645192047f7d4d87 100644 (file)
@@ -271,9 +271,10 @@ static void raid10_end_read_request(struct bio *bio, int error)
                 */
                set_bit(R10BIO_Uptodate, &r10_bio->state);
                raid_end_bio_io(r10_bio);
+               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
        } else {
                /*
-                * oops, read error:
+                * oops, read error - keep the refcount on the rdev
                 */
                char b[BDEVNAME_SIZE];
                if (printk_ratelimit())
@@ -282,8 +283,6 @@ static void raid10_end_read_request(struct bio *bio, int error)
                               bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
                reschedule_retry(r10_bio);
        }
-
-       rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
 
 static void raid10_end_write_request(struct bio *bio, int error)
@@ -488,13 +487,19 @@ static int raid10_mergeable_bvec(struct request_queue *q,
 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
 {
        const sector_t this_sector = r10_bio->sector;
-       int disk, slot, nslot;
+       int disk, slot;
        const int sectors = r10_bio->sectors;
-       sector_t new_distance, current_distance;
+       sector_t new_distance, best_dist;
        mdk_rdev_t *rdev;
+       int do_balance;
+       int best_slot;
 
        raid10_find_phys(conf, r10_bio);
        rcu_read_lock();
+retry:
+       best_slot = -1;
+       best_dist = MaxSector;
+       do_balance = 1;
        /*
         * Check if we can balance. We can balance on the whole
         * device if no resync is going on (recovery is ok), or below
@@ -502,86 +507,58 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
         * above the resync window.
         */
        if (conf->mddev->recovery_cp < MaxSector
-           && (this_sector + sectors >= conf->next_resync)) {
-               /* make sure that disk is operational */
-               slot = 0;
-               disk = r10_bio->devs[slot].devnum;
-
-               while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
-                      r10_bio->devs[slot].bio == IO_BLOCKED ||
-                      !test_bit(In_sync, &rdev->flags)) {
-                       slot++;
-                       if (slot == conf->copies) {
-                               slot = 0;
-                               disk = -1;
-                               break;
-                       }
-                       disk = r10_bio->devs[slot].devnum;
-               }
-               goto rb_out;
-       }
-
+           && (this_sector + sectors >= conf->next_resync))
+               do_balance = 0;
 
-       /* make sure the disk is operational */
-       slot = 0;
-       disk = r10_bio->devs[slot].devnum;
-       while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
-              r10_bio->devs[slot].bio == IO_BLOCKED ||
-              !test_bit(In_sync, &rdev->flags)) {
-               slot ++;
-               if (slot == conf->copies) {
-                       disk = -1;
-                       goto rb_out;
-               }
+       for (slot = 0; slot < conf->copies ; slot++) {
+               if (r10_bio->devs[slot].bio == IO_BLOCKED)
+                       continue;
                disk = r10_bio->devs[slot].devnum;
-       }
-
-
-       current_distance = abs(r10_bio->devs[slot].addr -
-                              conf->mirrors[disk].head_position);
-
-       /* Find the disk whose head is closest,
-        * or - for far > 1 - find the closest to partition beginning */
-
-       for (nslot = slot; nslot < conf->copies; nslot++) {
-               int ndisk = r10_bio->devs[nslot].devnum;
-
-
-               if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
-                   r10_bio->devs[nslot].bio == IO_BLOCKED ||
-                   !test_bit(In_sync, &rdev->flags))
+               rdev = rcu_dereference(conf->mirrors[disk].rdev);
+               if (rdev == NULL)
                        continue;
+               if (!test_bit(In_sync, &rdev->flags))
+                       continue;
+
+               if (!do_balance)
+                       break;
 
                /* This optimisation is debatable, and completely destroys
                 * sequential read speed for 'far copies' arrays.  So only
                 * keep it for 'near' arrays, and review those later.
                 */
-               if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
-                       disk = ndisk;
-                       slot = nslot;
+               if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
                        break;
-               }
 
                /* for far > 1 always use the lowest address */
                if (conf->far_copies > 1)
-                       new_distance = r10_bio->devs[nslot].addr;
+                       new_distance = r10_bio->devs[slot].addr;
                else
-                       new_distance = abs(r10_bio->devs[nslot].addr -
-                                          conf->mirrors[ndisk].head_position);
-               if (new_distance < current_distance) {
-                       current_distance = new_distance;
-                       disk = ndisk;
-                       slot = nslot;
+                       new_distance = abs(r10_bio->devs[slot].addr -
+                                          conf->mirrors[disk].head_position);
+               if (new_distance < best_dist) {
+                       best_dist = new_distance;
+                       best_slot = slot;
                }
        }
+       if (slot == conf->copies)
+               slot = best_slot;
 
-rb_out:
-       r10_bio->read_slot = slot;
-/*     conf->next_seq_sect = this_sector + sectors;*/
-
-       if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
-               atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
-       else
+       if (slot >= 0) {
+               disk = r10_bio->devs[slot].devnum;
+               rdev = rcu_dereference(conf->mirrors[disk].rdev);
+               if (!rdev)
+                       goto retry;
+               atomic_inc(&rdev->nr_pending);
+               if (test_bit(Faulty, &rdev->flags)) {
+                       /* Cannot risk returning a device that failed
+                        * before we inc'ed nr_pending
+                        */
+                       rdev_dec_pending(rdev, conf->mddev);
+                       goto retry;
+               }
+               r10_bio->read_slot = slot;
+       } else
                disk = -1;
        rcu_read_unlock();
 
@@ -1460,40 +1437,33 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
        int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
        int d = r10_bio->devs[r10_bio->read_slot].devnum;
 
-       rcu_read_lock();
-       rdev = rcu_dereference(conf->mirrors[d].rdev);
-       if (rdev) { /* If rdev is not NULL */
-               char b[BDEVNAME_SIZE];
-               int cur_read_error_count = 0;
+       /* still own a reference to this rdev, so it cannot
+        * have been cleared recently.
+        */
+       rdev = conf->mirrors[d].rdev;
 
-               bdevname(rdev->bdev, b);
+       if (test_bit(Faulty, &rdev->flags))
+               /* drive has already been failed, just ignore any
+                  more fix_read_error() attempts */
+               return;
 
-               if (test_bit(Faulty, &rdev->flags)) {
-                       rcu_read_unlock();
-                       /* drive has already been failed, just ignore any
-                          more fix_read_error() attempts */
-                       return;
-               }
+       check_decay_read_errors(mddev, rdev);
+       atomic_inc(&rdev->read_errors);
+       if (atomic_read(&rdev->read_errors) > max_read_errors) {
+               char b[BDEVNAME_SIZE];
+               bdevname(rdev->bdev, b);
 
-               check_decay_read_errors(mddev, rdev);
-               atomic_inc(&rdev->read_errors);
-               cur_read_error_count = atomic_read(&rdev->read_errors);
-               if (cur_read_error_count > max_read_errors) {
-                       rcu_read_unlock();
-                       printk(KERN_NOTICE
-                              "md/raid10:%s: %s: Raid device exceeded "
-                              "read_error threshold "
-                              "[cur %d:max %d]\n",
-                              mdname(mddev),
-                              b, cur_read_error_count, max_read_errors);
-                       printk(KERN_NOTICE
-                              "md/raid10:%s: %s: Failing raid "
-                              "device\n", mdname(mddev), b);
-                       md_error(mddev, conf->mirrors[d].rdev);
-                       return;
-               }
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Raid device exceeded "
+                      "read_error threshold [cur %d:max %d]\n",
+                      mdname(mddev), b,
+                      atomic_read(&rdev->read_errors), max_read_errors);
+               printk(KERN_NOTICE
+                      "md/raid10:%s: %s: Failing raid device\n",
+                      mdname(mddev), b);
+               md_error(mddev, conf->mirrors[d].rdev);
+               return;
        }
-       rcu_read_unlock();
 
        while(sectors) {
                int s = sectors;
@@ -1562,8 +1532,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                               "write failed"
                                               " (%d sectors at %llu on %s)\n",
                                               mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                              rdev->data_offset),
+                                              (unsigned long long)(
+                                                      sect + rdev->data_offset),
                                               bdevname(rdev->bdev, b));
                                        printk(KERN_NOTICE "md/raid10:%s: %s: failing "
                                               "drive\n",
@@ -1599,8 +1569,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                               "corrected sectors"
                                               " (%d sectors at %llu on %s)\n",
                                               mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                                   rdev->data_offset),
+                                              (unsigned long long)(
+                                                      sect + rdev->data_offset),
                                               bdevname(rdev->bdev, b));
                                        printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
                                               mdname(mddev),
@@ -1612,8 +1582,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                               "md/raid10:%s: read error corrected"
                                               " (%d sectors at %llu on %s)\n",
                                               mdname(mddev), s,
-                                              (unsigned long long)(sect+
-                                                   rdev->data_offset),
+                                              (unsigned long long)(
+                                                      sect + rdev->data_offset),
                                               bdevname(rdev->bdev, b));
                                }
 
@@ -1663,7 +1633,8 @@ static void raid10d(mddev_t *mddev)
                else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
                        recovery_request_write(mddev, r10_bio);
                else {
-                       int mirror;
+                       int slot = r10_bio->read_slot;
+                       int mirror = r10_bio->devs[slot].devnum;
                        /* we got a read error. Maybe the drive is bad.  Maybe just
                         * the block and we can fix it.
                         * We freeze all other IO, and try reading the block from
@@ -1677,9 +1648,10 @@ static void raid10d(mddev_t *mddev)
                                fix_read_error(conf, mddev, r10_bio);
                                unfreeze_array(conf);
                        }
+                       rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
 
-                       bio = r10_bio->devs[r10_bio->read_slot].bio;
-                       r10_bio->devs[r10_bio->read_slot].bio =
+                       bio = r10_bio->devs[slot].bio;
+                       r10_bio->devs[slot].bio =
                                mddev->ro ? IO_BLOCKED : NULL;
                        mirror = read_balance(conf, r10_bio);
                        if (mirror == -1) {
@@ -1693,6 +1665,7 @@ static void raid10d(mddev_t *mddev)
                        } else {
                                const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
                                bio_put(bio);
+                               slot = r10_bio->read_slot;
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
                                        printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
@@ -1702,8 +1675,8 @@ static void raid10d(mddev_t *mddev)
                                               (unsigned long long)r10_bio->sector);
                                bio = bio_clone_mddev(r10_bio->master_bio,
                                                      GFP_NOIO, mddev);
-                               r10_bio->devs[r10_bio->read_slot].bio = bio;
-                               bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
+                               r10_bio->devs[slot].bio = bio;
+                               bio->bi_sector = r10_bio->devs[slot].addr
                                        + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                bio->bi_rw = READ | do_sync;
@@ -1763,13 +1736,13 @@ static int init_resync(conf_t *conf)
  *
  */
 
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
+                            int *skipped, int go_faster)
 {
        conf_t *conf = mddev->private;
        r10bio_t *r10_bio;
        struct bio *biolist = NULL, *bio;
        sector_t max_sector, nr_sectors;
-       int disk;
        int i;
        int max_sync;
        sector_t sync_blocks;
@@ -1858,108 +1831,114 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                int j, k;
                r10_bio = NULL;
 
-               for (i=0 ; i<conf->raid_disks; i++)
-                       if (conf->mirrors[i].rdev &&
-                           !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
-                               int still_degraded = 0;
-                               /* want to reconstruct this device */
-                               r10bio_t *rb2 = r10_bio;
-                               sector_t sect = raid10_find_virt(conf, sector_nr, i);
-                               int must_sync;
-                               /* Unless we are doing a full sync, we only need
-                                * to recover the block if it is set in the bitmap
-                                */
-                               must_sync = bitmap_start_sync(mddev->bitmap, sect,
-                                                             &sync_blocks, 1);
-                               if (sync_blocks < max_sync)
-                                       max_sync = sync_blocks;
-                               if (!must_sync &&
-                                   !conf->fullsync) {
-                                       /* yep, skip the sync_blocks here, but don't assume
-                                        * that there will never be anything to do here
-                                        */
-                                       chunks_skipped = -1;
-                                       continue;
-                               }
+               for (i=0 ; i<conf->raid_disks; i++) {
+                       int still_degraded;
+                       r10bio_t *rb2;
+                       sector_t sect;
+                       int must_sync;
 
-                               r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
-                               raise_barrier(conf, rb2 != NULL);
-                               atomic_set(&r10_bio->remaining, 0);
+                       if (conf->mirrors[i].rdev == NULL ||
+                           test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 
+                               continue;
 
-                               r10_bio->master_bio = (struct bio*)rb2;
-                               if (rb2)
-                                       atomic_inc(&rb2->remaining);
-                               r10_bio->mddev = mddev;
-                               set_bit(R10BIO_IsRecover, &r10_bio->state);
-                               r10_bio->sector = sect;
+                       still_degraded = 0;
+                       /* want to reconstruct this device */
+                       rb2 = r10_bio;
+                       sect = raid10_find_virt(conf, sector_nr, i);
+                       /* Unless we are doing a full sync, we only need
+                        * to recover the block if it is set in the bitmap
+                        */
+                       must_sync = bitmap_start_sync(mddev->bitmap, sect,
+                                                     &sync_blocks, 1);
+                       if (sync_blocks < max_sync)
+                               max_sync = sync_blocks;
+                       if (!must_sync &&
+                           !conf->fullsync) {
+                               /* yep, skip the sync_blocks here, but don't assume
+                                * that there will never be anything to do here
+                                */
+                               chunks_skipped = -1;
+                               continue;
+                       }
 
-                               raid10_find_phys(conf, r10_bio);
+                       r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+                       raise_barrier(conf, rb2 != NULL);
+                       atomic_set(&r10_bio->remaining, 0);
 
-                               /* Need to check if the array will still be
-                                * degraded
-                                */
-                               for (j=0; j<conf->raid_disks; j++)
-                                       if (conf->mirrors[j].rdev == NULL ||
-                                           test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
-                                               still_degraded = 1;
-                                               break;
-                                       }
-
-                               must_sync = bitmap_start_sync(mddev->bitmap, sect,
-                                                             &sync_blocks, still_degraded);
-
-                               for (j=0; j<conf->copies;j++) {
-                                       int d = r10_bio->devs[j].devnum;
-                                       if (conf->mirrors[d].rdev &&
-                                           test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
-                                               /* This is where we read from */
-                                               bio = r10_bio->devs[0].bio;
-                                               bio->bi_next = biolist;
-                                               biolist = bio;
-                                               bio->bi_private = r10_bio;
-                                               bio->bi_end_io = end_sync_read;
-                                               bio->bi_rw = READ;
-                                               bio->bi_sector = r10_bio->devs[j].addr +
-                                                       conf->mirrors[d].rdev->data_offset;
-                                               bio->bi_bdev = conf->mirrors[d].rdev->bdev;
-                                               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-                                               atomic_inc(&r10_bio->remaining);
-                                               /* and we write to 'i' */
-
-                                               for (k=0; k<conf->copies; k++)
-                                                       if (r10_bio->devs[k].devnum == i)
-                                                               break;
-                                               BUG_ON(k == conf->copies);
-                                               bio = r10_bio->devs[1].bio;
-                                               bio->bi_next = biolist;
-                                               biolist = bio;
-                                               bio->bi_private = r10_bio;
-                                               bio->bi_end_io = end_sync_write;
-                                               bio->bi_rw = WRITE;
-                                               bio->bi_sector = r10_bio->devs[k].addr +
-                                                       conf->mirrors[i].rdev->data_offset;
-                                               bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
-                                               r10_bio->devs[0].devnum = d;
-                                               r10_bio->devs[1].devnum = i;
+                       r10_bio->master_bio = (struct bio*)rb2;
+                       if (rb2)
+                               atomic_inc(&rb2->remaining);
+                       r10_bio->mddev = mddev;
+                       set_bit(R10BIO_IsRecover, &r10_bio->state);
+                       r10_bio->sector = sect;
 
-                                               break;
-                                       }
-                               }
-                               if (j == conf->copies) {
-                                       /* Cannot recover, so abort the recovery */
-                                       put_buf(r10_bio);
-                                       if (rb2)
-                                               atomic_dec(&rb2->remaining);
-                                       r10_bio = rb2;
-                                       if (!test_and_set_bit(MD_RECOVERY_INTR,
-                                                             &mddev->recovery))
-                                               printk(KERN_INFO "md/raid10:%s: insufficient "
-                                                      "working devices for recovery.\n",
-                                                      mdname(mddev));
+                       raid10_find_phys(conf, r10_bio);
+
+                       /* Need to check if the array will still be
+                        * degraded
+                        */
+                       for (j=0; j<conf->raid_disks; j++)
+                               if (conf->mirrors[j].rdev == NULL ||
+                                   test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
+                                       still_degraded = 1;
                                        break;
                                }
+
+                       must_sync = bitmap_start_sync(mddev->bitmap, sect,
+                                                     &sync_blocks, still_degraded);
+
+                       for (j=0; j<conf->copies;j++) {
+                               int d = r10_bio->devs[j].devnum;
+                               if (!conf->mirrors[d].rdev ||
+                                   !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
+                                       continue;
+                               /* This is where we read from */
+                               bio = r10_bio->devs[0].bio;
+                               bio->bi_next = biolist;
+                               biolist = bio;
+                               bio->bi_private = r10_bio;
+                               bio->bi_end_io = end_sync_read;
+                               bio->bi_rw = READ;
+                               bio->bi_sector = r10_bio->devs[j].addr +
+                                       conf->mirrors[d].rdev->data_offset;
+                               bio->bi_bdev = conf->mirrors[d].rdev->bdev;
+                               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+                               atomic_inc(&r10_bio->remaining);
+                               /* and we write to 'i' */
+
+                               for (k=0; k<conf->copies; k++)
+                                       if (r10_bio->devs[k].devnum == i)
+                                               break;
+                               BUG_ON(k == conf->copies);
+                               bio = r10_bio->devs[1].bio;
+                               bio->bi_next = biolist;
+                               biolist = bio;
+                               bio->bi_private = r10_bio;
+                               bio->bi_end_io = end_sync_write;
+                               bio->bi_rw = WRITE;
+                               bio->bi_sector = r10_bio->devs[k].addr +
+                                       conf->mirrors[i].rdev->data_offset;
+                               bio->bi_bdev = conf->mirrors[i].rdev->bdev;
+
+                               r10_bio->devs[0].devnum = d;
+                               r10_bio->devs[1].devnum = i;
+
+                               break;
+                       }
+                       if (j == conf->copies) {
+                               /* Cannot recover, so abort the recovery */
+                               put_buf(r10_bio);
+                               if (rb2)
+                                       atomic_dec(&rb2->remaining);
+                               r10_bio = rb2;
+                               if (!test_and_set_bit(MD_RECOVERY_INTR,
+                                                     &mddev->recovery))
+                                       printk(KERN_INFO "md/raid10:%s: insufficient "
+                                              "working devices for recovery.\n",
+                                              mdname(mddev));
+                               break;
                        }
+               }
                if (biolist == NULL) {
                        while (r10_bio) {
                                r10bio_t *rb2 = r10_bio;
@@ -1977,7 +1956,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
 
                if (!bitmap_start_sync(mddev->bitmap, sector_nr,
                                       &sync_blocks, mddev->degraded) &&
-                   !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+                   !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
+                                                &mddev->recovery)) {
                        /* We can skip this block */
                        *skipped = 1;
                        return sync_blocks + sectors_skipped;
@@ -2022,7 +2002,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                        for (i=0; i<conf->copies; i++) {
                                int d = r10_bio->devs[i].devnum;
                                if (r10_bio->devs[i].bio->bi_end_io)
-                                       rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+                                       rdev_dec_pending(conf->mirrors[d].rdev,
+                                                        mddev);
                        }
                        put_buf(r10_bio);
                        biolist = NULL;
@@ -2047,26 +2028,27 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        do {
                struct page *page;
                int len = PAGE_SIZE;
-               disk = 0;
                if (sector_nr + (len>>9) > max_sector)
                        len = (max_sector - sector_nr) << 9;
                if (len == 0)
                        break;
                for (bio= biolist ; bio ; bio=bio->bi_next) {
+                       struct bio *bio2;
                        page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
-                       if (bio_add_page(bio, page, len, 0) == 0) {
-                               /* stop here */
-                               struct bio *bio2;
-                               bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
-                               for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
-                                       /* remove last page from this bio */
-                                       bio2->bi_vcnt--;
-                                       bio2->bi_size -= len;
-                                       bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
-                               }
-                               goto bio_full;
+                       if (bio_add_page(bio, page, len, 0))
+                               continue;
+
+                       /* stop here */
+                       bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
+                       for (bio2 = biolist;
+                            bio2 && bio2 != bio;
+                            bio2 = bio2->bi_next) {
+                               /* remove last page from this bio */
+                               bio2->bi_vcnt--;
+                               bio2->bi_size -= len;
+                               bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
                        }
-                       disk = i;
+                       goto bio_full;
                }
                nr_sectors += len>>9;
                sector_nr += len>>9;
index 49bf5f8914353a1c7972beaa27d6b5caee1031cc..34dd54539f7b6b1640764c80d770d526abbe706c 100644 (file)
@@ -1700,27 +1700,25 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
        raid5_conf_t *conf = mddev->private;
        pr_debug("raid456: error called\n");
 
-       if (!test_bit(Faulty, &rdev->flags)) {
-               set_bit(MD_CHANGE_DEVS, &mddev->flags);
-               if (test_and_clear_bit(In_sync, &rdev->flags)) {
-                       unsigned long flags;
-                       spin_lock_irqsave(&conf->device_lock, flags);
-                       mddev->degraded++;
-                       spin_unlock_irqrestore(&conf->device_lock, flags);
-                       /*
-                        * if recovery was running, make sure it aborts.
-                        */
-                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               }
-               set_bit(Faulty, &rdev->flags);
-               printk(KERN_ALERT
-                      "md/raid:%s: Disk failure on %s, disabling device.\n"
-                      "md/raid:%s: Operation continuing on %d devices.\n",
-                      mdname(mddev),
-                      bdevname(rdev->bdev, b),
-                      mdname(mddev),
-                      conf->raid_disks - mddev->degraded);
+       if (test_and_clear_bit(In_sync, &rdev->flags)) {
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               mddev->degraded++;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+               /*
+                * if recovery was running, make sure it aborts.
+                */
+               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        }
+       set_bit(Faulty, &rdev->flags);
+       set_bit(MD_CHANGE_DEVS, &mddev->flags);
+       printk(KERN_ALERT
+              "md/raid:%s: Disk failure on %s, disabling device.\n"
+              "md/raid:%s: Operation continuing on %d devices.\n",
+              mdname(mddev),
+              bdevname(rdev->bdev, b),
+              mdname(mddev),
+              conf->raid_disks - mddev->degraded);
 }
 
 /*
@@ -5391,7 +5389,8 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
                return -EINVAL;
        set_capacity(mddev->gendisk, mddev->array_sectors);
        revalidate_disk(mddev->gendisk);
-       if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
+       if (sectors > mddev->dev_sectors &&
+           mddev->recovery_cp > mddev->dev_sectors) {
                mddev->recovery_cp = mddev->dev_sectors;
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        }
index fc5ccd8c923aef46e86343cd09bbd3fa64a737a8..21c52e3b522e18df7ba3ff30412fc6f9650991af 100644 (file)
@@ -1320,14 +1320,10 @@ static int cmp_read(struct firedtv *fdtv, u64 addr, __be32 *data)
 {
        int ret;
 
-       mutex_lock(&fdtv->avc_mutex);
-
        ret = fdtv_read(fdtv, addr, data);
        if (ret < 0)
                dev_err(fdtv->device, "CMP: read I/O error\n");
 
-       mutex_unlock(&fdtv->avc_mutex);
-
        return ret;
 }
 
@@ -1335,18 +1331,9 @@ static int cmp_lock(struct firedtv *fdtv, u64 addr, __be32 data[])
 {
        int ret;
 
-       mutex_lock(&fdtv->avc_mutex);
-
-       /* data[] is stack-allocated and should not be DMA-mapped. */
-       memcpy(fdtv->avc_data, data, 8);
-
-       ret = fdtv_lock(fdtv, addr, fdtv->avc_data);
+       ret = fdtv_lock(fdtv, addr, data);
        if (ret < 0)
                dev_err(fdtv->device, "CMP: lock I/O error\n");
-       else
-               memcpy(data, fdtv->avc_data, 8);
-
-       mutex_unlock(&fdtv->avc_mutex);
 
        return ret;
 }
index 8022b743af91a6ff0cba565b9b9438296ed5e1ef..864b6274c72901b9f02323138355a0a44869baea 100644 (file)
@@ -125,6 +125,7 @@ static void handle_iso(struct fw_iso_context *context, u32 cycle,
 
                i = (i + 1) & (N_PACKETS - 1);
        }
+       fw_iso_context_queue_flush(ctx->context);
        ctx->current_packet = i;
 }
 
index 4b5e0ed49ed85bafa458edc577ab64844e1df884..a485f7fdaf37a51c26ae01bf42e5d5f0a48ec222 100644 (file)
@@ -15,6 +15,7 @@
  * Costa Mesa, CA 92626
  */
 
+#include <linux/prefetch.h>
 #include "be.h"
 #include "be_cmds.h"
 #include <asm/div64.h>
index e588511f47fb0560a4aa1f5e31b3b18c9a238ba8..7d25a97d33f6fd2daffd467e9be3984dd8153bda 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/if_vlan.h>
 #include <linux/if_ether.h>
 #include <linux/ip.h>
+#include <linux/prefetch.h>
 
 #include "bnad.h"
 #include "bna.h"
index ca2bbc0e5d48ca455141cbf718132b721c51008f..64d01e728a9db1ceb56c7ece03d295745ba83f21 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <linux/firmware.h>
+#include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
 
 #include "bnx2x_init.h"
index b948ea737550eab72234f3abe40b306bfc619bcb..58380d240619dc232b36c577268b1752606c455c 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 
 #include "cpl5_cmd.h"
 #include "sge.h"
index cde59b4e5ef803844e97de4f898105deae85a093..11a92afdf982ef62be6a6097a88f768e9b3d599e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/prefetch.h>
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define BCM_VLAN 1
 #endif
index cba1401377ab2351c515cbc2272b38506cbf887b..3f562ba2f0c9c70a81a94f52711929d26b6a46e5 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/tcp.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/arp.h>
 #include "common.h"
 #include "regs.h"
index 75a4b0fa19eeee7489a54e3cce1d995d70acc985..56adf448b9fe85664d77f60e0e7106c4262093bb 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/ip.h>
 #include <linux/dma-mapping.h>
 #include <linux/jiffies.h>
+#include <linux/prefetch.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
 #include "cxgb4.h"
index 5182960e29fd9dde94f7d02b654a8b6754bdcc37..5fd75fdaa631bd930fc96986ee0649b9d94729eb 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/ipv6.h>
 #include <net/tcp.h>
 #include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
 
 #include "t4vf_common.h"
 #include "t4vf_defs.h"
index c18cb8e883dd3f1d1e4f7eff8fa298c77c879a5b..76e8af00d86dab7d79a65842dddadf6cfed337c8 100644 (file)
@@ -29,6 +29,7 @@
 #include "e1000.h"
 #include <net/ip6_checksum.h>
 #include <linux/io.h>
+#include <linux/prefetch.h>
 
 /* Intel Media SOC GbE MDIO physical base address */
 static unsigned long ce4100_gbe_mdio_base_phy;
index 0939040305fac51f2bc7d0d0fcd653acbec3197b..d9600566a1fcc24d9d36fac4a2b41faab60fc8f3 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/pm_qos_params.h>
 #include <linux/pm_runtime.h>
 #include <linux/aer.h>
+#include <linux/prefetch.h>
 
 #include "e1000.h"
 
index 38104734a3be82b0fb4480526295510a4769e8bf..fddff8ec8cfd8e4e8a47e2147db42378ba2fec5b 100644 (file)
@@ -29,6 +29,7 @@
 #ifndef __EHEA_QMR_H__
 #define __EHEA_QMR_H__
 
+#include <linux/prefetch.h>
 #include "ehea.h"
 #include "ehea_hw.h"
 
index 3d99b0f1a236a240bde0e8ccd85ed9efc7b060e2..2f433fbfca0cfa30e8e95fd38aefb901792fad17 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/rtnetlink.h>
+#include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
 
 #include "cq_enet_desc.h"
index d09e8b0add0105cd94489faac70f34ddc01d8bce..537b6957bb79d68d5add833b4248506e868d0d19 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/prefetch.h>
 #include  <linux/io.h>
 
 #include <asm/irq.h>
index ce7838e55827b2e639f999d0b974b074bdaff7cb..18fccf913635e671fbca0d321c73aa4c484e2f42 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
 #include <linux/aer.h>
+#include <linux/prefetch.h>
 #ifdef CONFIG_IGB_DCA
 #include <linux/dca.h>
 #endif
index 1d04ca6fdaeac1e952e514367e6d69a144c9cac3..1c77fb3bf4ae19ab38521f5a7d7bc71d376c7fc7 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/prefetch.h>
 
 #include "igbvf.h"
 
index 0f681ac2da8dc54e721ad5565ecee38f46b3a092..6a130eb51cfaef41c556f81b5f19bfc26a3f926b 100644 (file)
@@ -28,6 +28,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/prefetch.h>
 #include "ixgb.h"
 
 char ixgb_driver_name[] = "ixgb";
index fa01b0b03b7740a8e746332eeb64f998444b7c5e..08e8e25c159d759d41ec35a64426bd0cec3708a0 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/prefetch.h>
 #include <scsi/fc/fc_fcoe.h>
 
 #include "ixgbe.h"
index d7ab202fb95c084baa785074a11f7e64c2777889..28d3cb21d3761a0d97b491aafffb4136f92db18c 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/prefetch.h>
 
 #include "ixgbevf.h"
 
index b1358f79ba0a9a1a0a4fcb4b07efbb012e9ffcad..bf84849600ce6e141dc7a6c84579f2ea6c315eaa 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/io.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/checksum.h>
 #include <net/ip.h>
 #include <net/tcp.h>
index 828e97cacdbfe33bc9351dea24f7d1b894f9e8af..9ec112ca62e4f71fa6368cbe3390dd5922fd05ae 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/tcp.h>
 #include <net/checksum.h>
 #include <linux/inet_lro.h>
+#include <linux/prefetch.h>
 
 #include <asm/irq.h>
 #include <asm/firmware.h>
index c2476fd965737c68c5e1b9f13cd2cb6ace48e609..eac3c5ca9731b8d9f15819a801654a8e76fd4917 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
+#include <linux/prefetch.h>
 
 #define DRV_VERSION     "1.00"
 const char pch_driver_version[] = DRV_VERSION;
index d495a6859fd9ff37371bb2354319111e91f12c43..771bb614ccc99bf06682a6824f3e0309c6074aa4 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/if_vlan.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/prefetch.h>
 
 #include "qla3xxx.h"
 
index 6c9d124cfc76a9e8b4627e64534289d5bf0b58e4..930ae45457bbc11df0316f07ab803a347fc5b75d 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
+#include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
 
 #include "qlge.h"
index 04f4e6086cd05bda8ce0e9c927b1b54af3d0dd3a..ef1ce2ebeb4a6784ecca5d3c10f7ca084308ab9a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
 #include <linux/pci-aspm.h>
+#include <linux/prefetch.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
index 89cfee7e864315eb119acee8dbbb34ce42ddaa2b..a9a5f5ed19c640a7518b0a7cb72832718485a785 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/tcp.h>
 
 #include <asm/system.h>
index d96d2f7a3f14162592d2b9dd2121ace90693453e..68d50429ddf3bfa1295491c0e042807aa503f856 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/mii.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/prefetch.h>
 
 #include <asm/cache.h>
 #include <asm/io.h>
index b7dc891b4461ce9357c3a54c25fdefa583b1146a..62e43649466eea9ed63676f52c9acf464ec05257 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
+#include <linux/prefetch.h>
 #include <net/ip.h>
 #include <net/checksum.h>
 #include "net_driver.h"
index 52a48cb75440abecaaf18b81f3aab26dd63b2169..f4be5c78ebfd28a8b0aad41197f425ba8c5a8703 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/mii.h>
 #include <linux/slab.h>
 #include <linux/dmi.h>
+#include <linux/prefetch.h>
 #include <asm/irq.h>
 
 #include "skge.h"
index e15c4a0bb96dd668a0a173a761532fe00b285d69..e25e44a45c28b5580fc266d33fa599ca2a8a7db6 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include "stmmac.h"
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
index 7ca51cebcddd832b7e4f7c69ab4589da50fdfc03..4a55a162dfe6409ea11dbe858ea86721bd40aa9c 100644 (file)
@@ -47,6 +47,7 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
 #include <linux/phy.h>
 #include <linux/workqueue.h>
 #include <linux/platform_device.h>
+#include <linux/prefetch.h>
 #include <asm/io.h>
 #include <asm/byteorder.h>
 
index fc837cf6bd4d1e052e689a85375c0bf6f735cb23..8ab870a2ad0239105f579558bb5c077ffb7dea6a 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/etherdevice.h>
 #include <linux/firmware.h>
 #include <linux/net_tstamp.h>
+#include <linux/prefetch.h>
 #include "vxge-main.h"
 #include "vxge-reg.h"
 
index 2638b8d97b8f016c0005d8d123d3ce225a182ffd..f93517055162909cdf93459956ea161f93200609 100644 (file)
@@ -12,6 +12,7 @@
  * Copyright(c) 2002-2010 Exar Corp.
  ******************************************************************************/
 #include <linux/etherdevice.h>
+#include <linux/prefetch.h>
 
 #include "vxge-traffic.h"
 #include "vxge-config.h"
index e6a8d8c0101d6185a9adc1645e45e3d7feca7c81..6d139679eb7acc0e1f688e8bd3fb8b0c3c6f3a3a 100644 (file)
@@ -1562,6 +1562,17 @@ config FB_VIA_DIRECT_PROCFS
          correct output device configuration.
          Its use is strongly discouraged.
 
+config FB_VIA_X_COMPATIBILITY
+       bool "X server compatibility"
+       depends on FB_VIA
+       default n
+       help
+         This option reduces the functionality (power saving, ...) of the
+         framebuffer to avoid negative impact on the OpenChrome X server.
+         If you use any X server other than fbdev you should enable this
+         otherwise it should be safe to disable it and allow using all
+         features.
+
 endif
 
 config FB_NEOMAGIC
index 96f01ee2a412f1df7aadc330169525c5df644ed8..5108136e87764c9f2cfcf2f4a7d7d61c630ba5e1 100644 (file)
@@ -6,4 +6,4 @@ obj-$(CONFIG_FB_VIA) += viafb.o
 
 viafb-y        :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
        via_utility.o vt1636.o global.o tblDPASetting.o viamode.o \
-       via-core.o via-gpio.o via_modesetting.o
+       via-core.o via-gpio.o via_modesetting.o via_clock.o
index 29d70244a21fe53c75d314b61221e1b9dba29c0a..3ebf20c06eef0a4a2f62212fed1952c378aa0806 100644 (file)
@@ -137,17 +137,11 @@ struct chip_information {
        struct lvds_chip_information lvds_chip_info2;
 };
 
-struct crt_setting_information {
-       int iga_path;
-};
-
 struct tmds_setting_information {
        int iga_path;
        int h_active;
        int v_active;
        int max_pixel_clock;
-       int max_hres;
-       int max_vres;
 };
 
 struct lvds_setting_information {
index 41ca198b5098198ee46edc8a3fce58106e894e69..b1f364745ca007d8774566955d6f0d8bdf28b87b 100644 (file)
@@ -28,17 +28,11 @@ static int tmds_register_read_bytes(int index, u8 *buff, int buff_len);
 static void __devinit dvi_get_panel_size_from_DDCv1(
        struct tmds_chip_information *tmds_chip,
        struct tmds_setting_information *tmds_setting);
-static void __devinit dvi_get_panel_size_from_DDCv2(
-       struct tmds_chip_information *tmds_chip,
-       struct tmds_setting_information *tmds_setting);
 static int viafb_dvi_query_EDID(void);
 
-static int check_tmds_chip(int device_id_subaddr, int device_id)
+static inline bool check_tmds_chip(int device_id_subaddr, int device_id)
 {
-       if (tmds_register_read(device_id_subaddr) == device_id)
-               return OK;
-       else
-               return FAIL;
+       return tmds_register_read(device_id_subaddr) == device_id;
 }
 
 void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
@@ -47,22 +41,13 @@ void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
        DEBUG_MSG(KERN_INFO "viafb_init_dvi_size()\n");
 
        viafb_dvi_sense();
-       switch (viafb_dvi_query_EDID()) {
-       case 1:
+       if (viafb_dvi_query_EDID() == 1)
                dvi_get_panel_size_from_DDCv1(tmds_chip, tmds_setting);
-               break;
-       case 2:
-               dvi_get_panel_size_from_DDCv2(tmds_chip, tmds_setting);
-               break;
-       default:
-               printk(KERN_WARNING "viafb_init_dvi_size: DVI panel size undetected!\n");
-               break;
-       }
 
        return;
 }
 
-int __devinit viafb_tmds_trasmitter_identify(void)
+bool __devinit viafb_tmds_trasmitter_identify(void)
 {
        unsigned char sr2a = 0, sr1e = 0, sr3e = 0;
 
@@ -101,7 +86,7 @@ int __devinit viafb_tmds_trasmitter_identify(void)
        viaparinfo->chip_info->
                tmds_chip_info.tmds_chip_slave_addr = VT1632_TMDS_I2C_ADDR;
        viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_31;
-       if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID) != FAIL) {
+       if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)) {
                /*
                 * Currently only support 12bits,dual edge,add 24bits mode later
                 */
@@ -112,11 +97,10 @@ int __devinit viafb_tmds_trasmitter_identify(void)
                          viaparinfo->chip_info->tmds_chip_info.tmds_chip_name);
                DEBUG_MSG(KERN_INFO "\n %2d",
                          viaparinfo->chip_info->tmds_chip_info.i2c_port);
-               return OK;
+               return true;
        } else {
                viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_2C;
-               if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)
-                   != FAIL) {
+               if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)) {
                        tmds_register_write(0x08, 0x3b);
                        DEBUG_MSG(KERN_INFO "\n VT1632 TMDS ! \n");
                        DEBUG_MSG(KERN_INFO "\n %2d",
@@ -125,7 +109,7 @@ int __devinit viafb_tmds_trasmitter_identify(void)
                        DEBUG_MSG(KERN_INFO "\n %2d",
                                  viaparinfo->chip_info->
                                  tmds_chip_info.i2c_port);
-                       return OK;
+                       return true;
                }
        }
 
@@ -135,7 +119,7 @@ int __devinit viafb_tmds_trasmitter_identify(void)
            ((viafb_display_hardware_layout == HW_LAYOUT_DVI_ONLY) ||
             (viafb_display_hardware_layout == HW_LAYOUT_LCD_DVI))) {
                DEBUG_MSG(KERN_INFO "\n Integrated TMDS ! \n");
-               return OK;
+               return true;
        }
 
        switch (viaparinfo->chip_info->gfx_chip_name) {
@@ -159,7 +143,7 @@ int __devinit viafb_tmds_trasmitter_identify(void)
                tmds_chip_info.tmds_chip_name = NON_TMDS_TRANSMITTER;
        viaparinfo->chip_info->tmds_chip_info.
                tmds_chip_slave_addr = VT1632_TMDS_I2C_ADDR;
-       return FAIL;
+       return false;
 }
 
 static void tmds_register_write(int index, u8 data)
@@ -306,12 +290,7 @@ static int viafb_dvi_query_EDID(void)
                return EDID_VERSION_1;  /* Found EDID1 Table */
        }
 
-       data0 = (u8) tmds_register_read(0x00);
-       viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr = restore;
-       if (data0 == 0x20)
-               return EDID_VERSION_2;  /* Found EDID2 Table */
-       else
-               return false;
+       return false;
 }
 
 /* Get Panel Size Using EDID1 Table */
@@ -319,50 +298,15 @@ static void __devinit dvi_get_panel_size_from_DDCv1(
        struct tmds_chip_information *tmds_chip,
        struct tmds_setting_information *tmds_setting)
 {
-       int i, max_h = 0, tmp, restore;
-       unsigned char rData;
+       int i, restore;
        unsigned char EDID_DATA[18];
 
        DEBUG_MSG(KERN_INFO "\n dvi_get_panel_size_from_DDCv1 \n");
 
        restore = tmds_chip->tmds_chip_slave_addr;
        tmds_chip->tmds_chip_slave_addr = 0xA0;
-
-       rData = tmds_register_read(0x23);
-       if (rData & 0x3C)
-               max_h = 640;
-       if (rData & 0xC0)
-               max_h = 720;
-       if (rData & 0x03)
-               max_h = 800;
-
-       rData = tmds_register_read(0x24);
-       if (rData & 0xC0)
-               max_h = 800;
-       if (rData & 0x1E)
-               max_h = 1024;
-       if (rData & 0x01)
-               max_h = 1280;
-
        for (i = 0x25; i < 0x6D; i++) {
                switch (i) {
-               case 0x26:
-               case 0x28:
-               case 0x2A:
-               case 0x2C:
-               case 0x2E:
-               case 0x30:
-               case 0x32:
-               case 0x34:
-                       rData = tmds_register_read(i);
-                       if (rData == 1)
-                               break;
-                       /* data = (data + 31) * 8 */
-                       tmp = (rData + 31) << 3;
-                       if (tmp > max_h)
-                               max_h = tmp;
-                       break;
-
                case 0x36:
                case 0x48:
                case 0x5A:
@@ -383,91 +327,11 @@ static void __devinit dvi_get_panel_size_from_DDCv1(
                }
        }
 
-       tmds_setting->max_hres = max_h;
-       switch (max_h) {
-       case 640:
-               tmds_setting->max_vres = 480;
-               break;
-       case 800:
-               tmds_setting->max_vres = 600;
-               break;
-       case 1024:
-               tmds_setting->max_vres = 768;
-               break;
-       case 1280:
-               tmds_setting->max_vres = 1024;
-               break;
-       case 1400:
-               tmds_setting->max_vres = 1050;
-               break;
-       case 1440:
-               tmds_setting->max_vres = 1050;
-               break;
-       case 1600:
-               tmds_setting->max_vres = 1200;
-               break;
-       case 1920:
-               tmds_setting->max_vres = 1080;
-               break;
-       default:
-               DEBUG_MSG(KERN_INFO "Unknown panel size max resolution = %d ! "
-                                        "set default panel size.\n", max_h);
-               break;
-       }
-
        DEBUG_MSG(KERN_INFO "DVI max pixelclock = %d\n",
                tmds_setting->max_pixel_clock);
        tmds_chip->tmds_chip_slave_addr = restore;
 }
 
-/* Get Panel Size Using EDID2 Table */
-static void __devinit dvi_get_panel_size_from_DDCv2(
-       struct tmds_chip_information *tmds_chip,
-       struct tmds_setting_information *tmds_setting)
-{
-       int restore;
-       unsigned char R_Buffer[2];
-
-       DEBUG_MSG(KERN_INFO "\n dvi_get_panel_size_from_DDCv2 \n");
-
-       restore = tmds_chip->tmds_chip_slave_addr;
-       tmds_chip->tmds_chip_slave_addr = 0xA2;
-
-       /* Horizontal: 0x76, 0x77 */
-       tmds_register_read_bytes(0x76, R_Buffer, 2);
-       tmds_setting->max_hres = R_Buffer[0] + (R_Buffer[1] << 8);
-
-       switch (tmds_setting->max_hres) {
-       case 640:
-               tmds_setting->max_vres = 480;
-               break;
-       case 800:
-               tmds_setting->max_vres = 600;
-               break;
-       case 1024:
-               tmds_setting->max_vres = 768;
-               break;
-       case 1280:
-               tmds_setting->max_vres = 1024;
-               break;
-       case 1400:
-               tmds_setting->max_vres = 1050;
-               break;
-       case 1440:
-               tmds_setting->max_vres = 1050;
-               break;
-       case 1600:
-               tmds_setting->max_vres = 1200;
-               break;
-       default:
-               DEBUG_MSG(KERN_INFO "Unknown panel size max resolution = %d! "
-                       "set default panel size.\n", tmds_setting->max_hres);
-               break;
-       }
-
-       tmds_chip->tmds_chip_slave_addr = restore;
-}
-
 /* If Disable DVI, turn off pad */
 void viafb_dvi_disable(void)
 {
index 2c525c0c1adb157ca3cd171bba9a45e997ab4894..f473dd010977ba1222da41a8f04878f124317a51 100644 (file)
@@ -56,7 +56,7 @@
 int viafb_dvi_sense(void);
 void viafb_dvi_disable(void);
 void viafb_dvi_enable(void);
-int __devinit viafb_tmds_trasmitter_identify(void);
+bool __devinit viafb_tmds_trasmitter_identify(void);
 void __devinit viafb_init_dvi_size(struct tmds_chip_information *tmds_chip,
        struct tmds_setting_information *tmds_setting);
 void viafb_dvi_set_mode(struct VideoModeTable *videoMode, int mode_bpp,
index 1ee511b733071b1641ea5eabff3e4c9073c7f98c..e10d8249534c682543f0a43cfb9398240e357219 100644 (file)
@@ -40,10 +40,6 @@ int viafb_hotplug_Yres = 480;
 int viafb_hotplug_bpp = 32;
 int viafb_hotplug_refresh = 60;
 int viafb_primary_dev = None_Device;
-unsigned int viafb_second_xres = 640;
-unsigned int viafb_second_yres = 480;
-unsigned int viafb_second_virtual_xres;
-unsigned int viafb_second_virtual_yres;
 int viafb_lcd_panel_id = LCD_PANEL_ID_MAXIMUM + 1;
 struct fb_info *viafbinfo;
 struct fb_info *viafbinfo1;
index 38ef5ac669539dc69191f65eea8f6d7923c5ebac..ff969dc3459352bbfea5df7dec88e111576584c3 100644 (file)
@@ -73,8 +73,6 @@ extern int viafb_hotplug_bpp;
 extern int viafb_hotplug_refresh;
 extern int viafb_primary_dev;
 
-extern unsigned int viafb_second_xres;
-extern unsigned int viafb_second_yres;
 extern int viafb_lcd_panel_id;
 
 #endif /* __GLOBAL_H__ */
index dc4c778877ceee51fdf128abe304f0fb3ce9a350..47b13535ed2b6279a568a3f7c0b7ed00edd2a8c3 100644 (file)
  */
 
 #include <linux/via-core.h>
+#include <asm/olpc.h>
 #include "global.h"
-
-static struct pll_config cle266_pll_config[] = {
-       {19, 4, 0},
-       {26, 5, 0},
-       {28, 5, 0},
-       {31, 5, 0},
-       {33, 5, 0},
-       {55, 5, 0},
-       {102, 5, 0},
-       {53, 6, 0},
-       {92, 6, 0},
-       {98, 6, 0},
-       {112, 6, 0},
-       {41, 7, 0},
-       {60, 7, 0},
-       {99, 7, 0},
-       {100, 7, 0},
-       {83, 8, 0},
-       {86, 8, 0},
-       {108, 8, 0},
-       {87, 9, 0},
-       {118, 9, 0},
-       {95, 12, 0},
-       {115, 12, 0},
-       {108, 13, 0},
-       {83, 17, 0},
-       {67, 20, 0},
-       {86, 20, 0},
-       {98, 20, 0},
-       {121, 24, 0},
-       {99, 29, 0},
-       {33, 3, 1},
-       {15, 4, 1},
-       {23, 4, 1},
-       {37, 5, 1},
-       {83, 5, 1},
-       {85, 5, 1},
-       {94, 5, 1},
-       {103, 5, 1},
-       {109, 5, 1},
-       {113, 5, 1},
-       {121, 5, 1},
-       {82, 6, 1},
-       {31, 7, 1},
-       {55, 7, 1},
-       {84, 7, 1},
-       {83, 8, 1},
-       {76, 9, 1},
-       {127, 9, 1},
-       {33, 4, 2},
-       {75, 4, 2},
-       {119, 4, 2},
-       {121, 4, 2},
-       {91, 5, 2},
-       {118, 5, 2},
-       {83, 6, 2},
-       {109, 6, 2},
-       {90, 7, 2},
-       {93, 2, 3},
-       {53, 3, 3},
-       {73, 4, 3},
-       {89, 4, 3},
-       {105, 4, 3},
-       {117, 4, 3},
-       {101, 5, 3},
-       {121, 5, 3},
-       {127, 5, 3},
-       {99, 7, 3}
+#include "via_clock.h"
+
+static struct pll_limit cle266_pll_limits[] = {
+       {19, 19, 4, 0},
+       {26, 102, 5, 0},
+       {53, 112, 6, 0},
+       {41, 100, 7, 0},
+       {83, 108, 8, 0},
+       {87, 118, 9, 0},
+       {95, 115, 12, 0},
+       {108, 108, 13, 0},
+       {83, 83, 17, 0},
+       {67, 98, 20, 0},
+       {121, 121, 24, 0},
+       {99, 99, 29, 0},
+       {33, 33, 3, 1},
+       {15, 23, 4, 1},
+       {37, 121, 5, 1},
+       {82, 82, 6, 1},
+       {31, 84, 7, 1},
+       {83, 83, 8, 1},
+       {76, 127, 9, 1},
+       {33, 121, 4, 2},
+       {91, 118, 5, 2},
+       {83, 109, 6, 2},
+       {90, 90, 7, 2},
+       {93, 93, 2, 3},
+       {53, 53, 3, 3},
+       {73, 117, 4, 3},
+       {101, 127, 5, 3},
+       {99, 99, 7, 3}
 };
 
-static struct pll_config k800_pll_config[] = {
-       {22, 2, 0},
-       {28, 3, 0},
-       {81, 3, 1},
-       {85, 3, 1},
-       {98, 3, 1},
-       {112, 3, 1},
-       {86, 4, 1},
-       {166, 4, 1},
-       {109, 5, 1},
-       {113, 5, 1},
-       {121, 5, 1},
-       {131, 5, 1},
-       {143, 5, 1},
-       {153, 5, 1},
-       {66, 3, 2},
-       {68, 3, 2},
-       {95, 3, 2},
-       {106, 3, 2},
-       {116, 3, 2},
-       {93, 4, 2},
-       {119, 4, 2},
-       {121, 4, 2},
-       {133, 4, 2},
-       {137, 4, 2},
-       {117, 5, 2},
-       {118, 5, 2},
-       {120, 5, 2},
-       {124, 5, 2},
-       {132, 5, 2},
-       {137, 5, 2},
-       {141, 5, 2},
-       {166, 5, 2},
-       {170, 5, 2},
-       {191, 5, 2},
-       {206, 5, 2},
-       {208, 5, 2},
-       {30, 2, 3},
-       {69, 3, 3},
-       {82, 3, 3},
-       {83, 3, 3},
-       {109, 3, 3},
-       {114, 3, 3},
-       {125, 3, 3},
-       {89, 4, 3},
-       {103, 4, 3},
-       {117, 4, 3},
-       {126, 4, 3},
-       {150, 4, 3},
-       {161, 4, 3},
-       {121, 5, 3},
-       {127, 5, 3},
-       {131, 5, 3},
-       {134, 5, 3},
-       {148, 5, 3},
-       {169, 5, 3},
-       {172, 5, 3},
-       {182, 5, 3},
-       {195, 5, 3},
-       {196, 5, 3},
-       {208, 5, 3},
-       {66, 2, 4},
-       {85, 3, 4},
-       {141, 4, 4},
-       {146, 4, 4},
-       {161, 4, 4},
-       {177, 5, 4}
+static struct pll_limit k800_pll_limits[] = {
+       {22, 22, 2, 0},
+       {28, 28, 3, 0},
+       {81, 112, 3, 1},
+       {86, 166, 4, 1},
+       {109, 153, 5, 1},
+       {66, 116, 3, 2},
+       {93, 137, 4, 2},
+       {117, 208, 5, 2},
+       {30, 30, 2, 3},
+       {69, 125, 3, 3},
+       {89, 161, 4, 3},
+       {121, 208, 5, 3},
+       {66, 66, 2, 4},
+       {85, 85, 3, 4},
+       {141, 161, 4, 4},
+       {177, 177, 5, 4}
 };
 
-static struct pll_config cx700_pll_config[] = {
-       {98, 3, 1},
-       {86, 4, 1},
-       {109, 5, 1},
-       {110, 5, 1},
-       {113, 5, 1},
-       {121, 5, 1},
-       {131, 5, 1},
-       {135, 5, 1},
-       {142, 5, 1},
-       {143, 5, 1},
-       {153, 5, 1},
-       {187, 5, 1},
-       {208, 5, 1},
-       {68, 2, 2},
-       {95, 3, 2},
-       {116, 3, 2},
-       {93, 4, 2},
-       {119, 4, 2},
-       {133, 4, 2},
-       {137, 4, 2},
-       {151, 4, 2},
-       {166, 4, 2},
-       {110, 5, 2},
-       {112, 5, 2},
-       {117, 5, 2},
-       {118, 5, 2},
-       {120, 5, 2},
-       {132, 5, 2},
-       {137, 5, 2},
-       {141, 5, 2},
-       {151, 5, 2},
-       {166, 5, 2},
-       {175, 5, 2},
-       {191, 5, 2},
-       {206, 5, 2},
-       {174, 7, 2},
-       {82, 3, 3},
-       {109, 3, 3},
-       {117, 4, 3},
-       {150, 4, 3},
-       {161, 4, 3},
-       {112, 5, 3},
-       {115, 5, 3},
-       {121, 5, 3},
-       {127, 5, 3},
-       {129, 5, 3},
-       {131, 5, 3},
-       {134, 5, 3},
-       {138, 5, 3},
-       {148, 5, 3},
-       {157, 5, 3},
-       {169, 5, 3},
-       {172, 5, 3},
-       {190, 5, 3},
-       {195, 5, 3},
-       {196, 5, 3},
-       {208, 5, 3},
-       {141, 5, 4},
-       {150, 5, 4},
-       {166, 5, 4},
-       {176, 5, 4},
-       {177, 5, 4},
-       {183, 5, 4},
-       {202, 5, 4}
+static struct pll_limit cx700_pll_limits[] = {
+       {98, 98, 3, 1},
+       {86, 86, 4, 1},
+       {109, 208, 5, 1},
+       {68, 68, 2, 2},
+       {95, 116, 3, 2},
+       {93, 166, 4, 2},
+       {110, 206, 5, 2},
+       {174, 174, 7, 2},
+       {82, 109, 3, 3},
+       {117, 161, 4, 3},
+       {112, 208, 5, 3},
+       {141, 202, 5, 4}
 };
 
-static struct pll_config vx855_pll_config[] = {
-       {86, 4, 1},
-       {108, 5, 1},
-       {110, 5, 1},
-       {113, 5, 1},
-       {121, 5, 1},
-       {131, 5, 1},
-       {135, 5, 1},
-       {142, 5, 1},
-       {143, 5, 1},
-       {153, 5, 1},
-       {164, 5, 1},
-       {187, 5, 1},
-       {208, 5, 1},
-       {110, 5, 2},
-       {112, 5, 2},
-       {117, 5, 2},
-       {118, 5, 2},
-       {124, 5, 2},
-       {132, 5, 2},
-       {137, 5, 2},
-       {141, 5, 2},
-       {149, 5, 2},
-       {151, 5, 2},
-       {159, 5, 2},
-       {166, 5, 2},
-       {167, 5, 2},
-       {172, 5, 2},
-       {189, 5, 2},
-       {191, 5, 2},
-       {194, 5, 2},
-       {206, 5, 2},
-       {208, 5, 2},
-       {83, 3, 3},
-       {88, 3, 3},
-       {109, 3, 3},
-       {112, 3, 3},
-       {103, 4, 3},
-       {105, 4, 3},
-       {161, 4, 3},
-       {112, 5, 3},
-       {115, 5, 3},
-       {121, 5, 3},
-       {127, 5, 3},
-       {134, 5, 3},
-       {137, 5, 3},
-       {148, 5, 3},
-       {157, 5, 3},
-       {169, 5, 3},
-       {172, 5, 3},
-       {182, 5, 3},
-       {191, 5, 3},
-       {195, 5, 3},
-       {209, 5, 3},
-       {142, 4, 4},
-       {146, 4, 4},
-       {161, 4, 4},
-       {141, 5, 4},
-       {150, 5, 4},
-       {165, 5, 4},
-       {176, 5, 4}
+static struct pll_limit vx855_pll_limits[] = {
+       {86, 86, 4, 1},
+       {108, 208, 5, 1},
+       {110, 208, 5, 2},
+       {83, 112, 3, 3},
+       {103, 161, 4, 3},
+       {112, 209, 5, 3},
+       {142, 161, 4, 4},
+       {141, 176, 5, 4}
 };
 
 /* according to VIA Technologies these values are based on experiment */
@@ -308,6 +118,42 @@ static struct io_reg scaling_parameters[] = {
        {VIACR, CR87, 0xFF, 0x1F},      /* LCD Scaling Parameter 14 */
 };
 
+static struct io_reg common_vga[] = {
+       {VIACR, CR07, 0x10, 0x10}, /* [0] vertical total (bit 8)
+                                       [1] vertical display end (bit 8)
+                                       [2] vertical retrace start (bit 8)
+                                       [3] start vertical blanking (bit 8)
+                                       [4] line compare (bit 8)
+                                       [5] vertical total (bit 9)
+                                       [6] vertical display end (bit 9)
+                                       [7] vertical retrace start (bit 9) */
+       {VIACR, CR08, 0xFF, 0x00}, /* [0-4] preset row scan
+                                       [5-6] byte panning */
+       {VIACR, CR09, 0xDF, 0x40}, /* [0-4] max scan line
+                                       [5] start vertical blanking (bit 9)
+                                       [6] line compare (bit 9)
+                                       [7] scan doubling */
+       {VIACR, CR0A, 0xFF, 0x1E}, /* [0-4] cursor start
+                                       [5] cursor disable */
+       {VIACR, CR0B, 0xFF, 0x00}, /* [0-4] cursor end
+                                       [5-6] cursor skew */
+       {VIACR, CR0E, 0xFF, 0x00}, /* [0-7] cursor location (high) */
+       {VIACR, CR0F, 0xFF, 0x00}, /* [0-7] cursor location (low) */
+       {VIACR, CR11, 0xF0, 0x80}, /* [0-3] vertical retrace end
+                                       [6] memory refresh bandwidth
+                                       [7] CRTC register protect enable */
+       {VIACR, CR14, 0xFF, 0x00}, /* [0-4] underline location
+                                       [5] divide memory address clock by 4
+                                       [6] double word addressing */
+       {VIACR, CR17, 0xFF, 0x63}, /* [0-1] mapping of display address 13-14
+                                       [2] divide scan line clock by 2
+                                       [3] divide memory address clock by 2
+                                       [5] address wrap
+                                       [6] byte mode select
+                                       [7] sync enable */
+       {VIACR, CR18, 0xFF, 0xFF}, /* [0-7] line compare */
+};
+
 static struct fifo_depth_select display_fifo_depth_reg = {
        /* IGA1 FIFO Depth_Select */
        {IGA1_FIFO_DEPTH_SELECT_REG_NUM, {{SR17, 0, 7} } },
@@ -676,6 +522,9 @@ static struct via_device_mapping device_mapping[] = {
        {VIA_LVDS2, "LVDS2"}
 };
 
+/* structure with function pointers to support clock control */
+static struct via_clock clock;
+
 static void load_fix_bit_crtc_reg(void);
 static void __devinit init_gfx_chip_info(int chip_type);
 static void __devinit init_tmds_chip_info(void);
@@ -770,13 +619,14 @@ static u32 get_lcd_devices(int output_interface)
 /*Set IGA path for each device*/
 void viafb_set_iga_path(void)
 {
+       int crt_iga_path = 0;
 
        if (viafb_SAMM_ON == 1) {
                if (viafb_CRT_ON) {
                        if (viafb_primary_dev == CRT_Device)
-                               viaparinfo->crt_setting_info->iga_path = IGA1;
+                               crt_iga_path = IGA1;
                        else
-                               viaparinfo->crt_setting_info->iga_path = IGA2;
+                               crt_iga_path = IGA2;
                }
 
                if (viafb_DVI_ON) {
@@ -793,8 +643,7 @@ void viafb_set_iga_path(void)
                                        UNICHROME_CLE266)) {
                                        viaparinfo->
                                        lvds_setting_info->iga_path = IGA2;
-                                       viaparinfo->
-                                       crt_setting_info->iga_path = IGA1;
+                                       crt_iga_path = IGA1;
                                        viaparinfo->
                                        tmds_setting_info->iga_path = IGA1;
                                } else
@@ -814,10 +663,10 @@ void viafb_set_iga_path(void)
                viafb_SAMM_ON = 0;
 
                if (viafb_CRT_ON && viafb_LCD_ON) {
-                       viaparinfo->crt_setting_info->iga_path = IGA1;
+                       crt_iga_path = IGA1;
                        viaparinfo->lvds_setting_info->iga_path = IGA2;
                } else if (viafb_CRT_ON && viafb_DVI_ON) {
-                       viaparinfo->crt_setting_info->iga_path = IGA1;
+                       crt_iga_path = IGA1;
                        viaparinfo->tmds_setting_info->iga_path = IGA2;
                } else if (viafb_LCD_ON && viafb_DVI_ON) {
                        viaparinfo->tmds_setting_info->iga_path = IGA1;
@@ -826,7 +675,7 @@ void viafb_set_iga_path(void)
                        viaparinfo->lvds_setting_info->iga_path = IGA2;
                        viaparinfo->lvds_setting_info2->iga_path = IGA2;
                } else if (viafb_CRT_ON) {
-                       viaparinfo->crt_setting_info->iga_path = IGA1;
+                       crt_iga_path = IGA1;
                } else if (viafb_LCD_ON) {
                        viaparinfo->lvds_setting_info->iga_path = IGA2;
                } else if (viafb_DVI_ON) {
@@ -837,7 +686,7 @@ void viafb_set_iga_path(void)
        viaparinfo->shared->iga1_devices = 0;
        viaparinfo->shared->iga2_devices = 0;
        if (viafb_CRT_ON) {
-               if (viaparinfo->crt_setting_info->iga_path == IGA1)
+               if (crt_iga_path == IGA1)
                        viaparinfo->shared->iga1_devices |= VIA_CRT;
                else
                        viaparinfo->shared->iga2_devices |= VIA_CRT;
@@ -875,6 +724,10 @@ void viafb_set_iga_path(void)
                                viaparinfo->chip_info->
                                lvds_chip_info2.output_interface);
        }
+
+       /* looks like the OLPC has its display wired to DVP1 and LVDS2 */
+       if (machine_is_olpc())
+               viaparinfo->shared->iga2_devices = VIA_DVP1 | VIA_LVDS2;
 }
 
 static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
@@ -1162,25 +1015,17 @@ void via_odev_to_seq(struct seq_file *m, u32 odev)
 
 static void load_fix_bit_crtc_reg(void)
 {
+       viafb_unlock_crt();
+
        /* always set to 1 */
        viafb_write_reg_mask(CR03, VIACR, 0x80, BIT7);
        /* line compare should set all bits = 1 (extend modes) */
-       viafb_write_reg(CR18, VIACR, 0xff);
-       /* line compare should set all bits = 1 (extend modes) */
-       viafb_write_reg_mask(CR07, VIACR, 0x10, BIT4);
-       /* line compare should set all bits = 1 (extend modes) */
-       viafb_write_reg_mask(CR09, VIACR, 0x40, BIT6);
-       /* line compare should set all bits = 1 (extend modes) */
        viafb_write_reg_mask(CR35, VIACR, 0x10, BIT4);
        /* line compare should set all bits = 1 (extend modes) */
        viafb_write_reg_mask(CR33, VIACR, 0x06, BIT0 + BIT1 + BIT2);
        /*viafb_write_reg_mask(CR32, VIACR, 0x01, BIT0); */
-       /* extend mode always set to e3h */
-       viafb_write_reg(CR17, VIACR, 0xe3);
-       /* extend mode always set to 0h */
-       viafb_write_reg(CR08, VIACR, 0x00);
-       /* extend mode always set to 0h */
-       viafb_write_reg(CR14, VIACR, 0x00);
+
+       viafb_lock_crt();
 
        /* If K8M800, enable Prefetch Mode. */
        if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800)
@@ -1601,69 +1446,54 @@ void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active)
 
 }
 
-static u32 cle266_encode_pll(struct pll_config pll)
-{
-       return (pll.multiplier << 8)
-               | (pll.rshift << 6)
-               | pll.divisor;
-}
-
-static u32 k800_encode_pll(struct pll_config pll)
-{
-       return ((pll.divisor - 2) << 16)
-               | (pll.rshift << 10)
-               | (pll.multiplier - 2);
-}
-
-static u32 vx855_encode_pll(struct pll_config pll)
-{
-       return (pll.divisor << 16)
-               | (pll.rshift << 10)
-               | pll.multiplier;
-}
-
-static inline u32 get_pll_internal_frequency(u32 ref_freq,
-       struct pll_config pll)
-{
-       return ref_freq / pll.divisor * pll.multiplier;
-}
-
-static inline u32 get_pll_output_frequency(u32 ref_freq, struct pll_config pll)
-{
-       return get_pll_internal_frequency(ref_freq, pll)>>pll.rshift;
-}
-
-static struct pll_config get_pll_config(struct pll_config *config, int size,
+static struct via_pll_config get_pll_config(struct pll_limit *limits, int size,
        int clk)
 {
-       struct pll_config best = config[0];
+       struct via_pll_config cur, up, down, best = {0, 1, 0};
        const u32 f0 = 14318180; /* X1 frequency */
-       int i;
-
-       for (i = 1; i < size; i++) {
-               if (abs(get_pll_output_frequency(f0, config[i]) - clk)
-                       < abs(get_pll_output_frequency(f0, best) - clk))
-                       best = config[i];
+       int i, f;
+
+       for (i = 0; i < size; i++) {
+               cur.rshift = limits[i].rshift;
+               cur.divisor = limits[i].divisor;
+               cur.multiplier = clk / ((f0 / cur.divisor)>>cur.rshift);
+               f = abs(get_pll_output_frequency(f0, cur) - clk);
+               up = down = cur;
+               up.multiplier++;
+               down.multiplier--;
+               if (abs(get_pll_output_frequency(f0, up) - clk) < f)
+                       cur = up;
+               else if (abs(get_pll_output_frequency(f0, down) - clk) < f)
+                       cur = down;
+
+               if (cur.multiplier < limits[i].multiplier_min)
+                       cur.multiplier = limits[i].multiplier_min;
+               else if (cur.multiplier > limits[i].multiplier_max)
+                       cur.multiplier = limits[i].multiplier_max;
+
+               f = abs(get_pll_output_frequency(f0, cur) - clk);
+               if (f < abs(get_pll_output_frequency(f0, best) - clk))
+                       best = cur;
        }
 
        return best;
 }
 
-u32 viafb_get_clk_value(int clk)
+static struct via_pll_config get_best_pll_config(int clk)
 {
-       u32 value = 0;
+       struct via_pll_config config;
 
        switch (viaparinfo->chip_info->gfx_chip_name) {
        case UNICHROME_CLE266:
        case UNICHROME_K400:
-               value = cle266_encode_pll(get_pll_config(cle266_pll_config,
-                       ARRAY_SIZE(cle266_pll_config), clk));
+               config = get_pll_config(cle266_pll_limits,
+                       ARRAY_SIZE(cle266_pll_limits), clk);
                break;
        case UNICHROME_K800:
        case UNICHROME_PM800:
        case UNICHROME_CN700:
-               value = k800_encode_pll(get_pll_config(k800_pll_config,
-                       ARRAY_SIZE(k800_pll_config), clk));
+               config = get_pll_config(k800_pll_limits,
+                       ARRAY_SIZE(k800_pll_limits), clk);
                break;
        case UNICHROME_CX700:
        case UNICHROME_CN750:
@@ -1671,92 +1501,28 @@ u32 viafb_get_clk_value(int clk)
        case UNICHROME_P4M890:
        case UNICHROME_P4M900:
        case UNICHROME_VX800:
-               value = k800_encode_pll(get_pll_config(cx700_pll_config,
-                       ARRAY_SIZE(cx700_pll_config), clk));
+               config = get_pll_config(cx700_pll_limits,
+                       ARRAY_SIZE(cx700_pll_limits), clk);
                break;
        case UNICHROME_VX855:
        case UNICHROME_VX900:
-               value = vx855_encode_pll(get_pll_config(vx855_pll_config,
-                       ARRAY_SIZE(vx855_pll_config), clk));
+               config = get_pll_config(vx855_pll_limits,
+                       ARRAY_SIZE(vx855_pll_limits), clk);
                break;
        }
 
-       return value;
+       return config;
 }
 
 /* Set VCLK*/
 void viafb_set_vclock(u32 clk, int set_iga)
 {
-       /* H.W. Reset : ON */
-       viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
+       struct via_pll_config config = get_best_pll_config(clk);
 
-       if (set_iga == IGA1) {
-               /* Change D,N FOR VCLK */
-               switch (viaparinfo->chip_info->gfx_chip_name) {
-               case UNICHROME_CLE266:
-               case UNICHROME_K400:
-                       via_write_reg(VIASR, SR46, (clk & 0x00FF));
-                       via_write_reg(VIASR, SR47, (clk & 0xFF00) >> 8);
-                       break;
-
-               case UNICHROME_K800:
-               case UNICHROME_PM800:
-               case UNICHROME_CN700:
-               case UNICHROME_CX700:
-               case UNICHROME_CN750:
-               case UNICHROME_K8M890:
-               case UNICHROME_P4M890:
-               case UNICHROME_P4M900:
-               case UNICHROME_VX800:
-               case UNICHROME_VX855:
-               case UNICHROME_VX900:
-                       via_write_reg(VIASR, SR44, (clk & 0x0000FF));
-                       via_write_reg(VIASR, SR45, (clk & 0x00FF00) >> 8);
-                       via_write_reg(VIASR, SR46, (clk & 0xFF0000) >> 16);
-                       break;
-               }
-       }
-
-       if (set_iga == IGA2) {
-               /* Change D,N FOR LCK */
-               switch (viaparinfo->chip_info->gfx_chip_name) {
-               case UNICHROME_CLE266:
-               case UNICHROME_K400:
-                       via_write_reg(VIASR, SR44, (clk & 0x00FF));
-                       via_write_reg(VIASR, SR45, (clk & 0xFF00) >> 8);
-                       break;
-
-               case UNICHROME_K800:
-               case UNICHROME_PM800:
-               case UNICHROME_CN700:
-               case UNICHROME_CX700:
-               case UNICHROME_CN750:
-               case UNICHROME_K8M890:
-               case UNICHROME_P4M890:
-               case UNICHROME_P4M900:
-               case UNICHROME_VX800:
-               case UNICHROME_VX855:
-               case UNICHROME_VX900:
-                       via_write_reg(VIASR, SR4A, (clk & 0x0000FF));
-                       via_write_reg(VIASR, SR4B, (clk & 0x00FF00) >> 8);
-                       via_write_reg(VIASR, SR4C, (clk & 0xFF0000) >> 16);
-                       break;
-               }
-       }
-
-       /* H.W. Reset : OFF */
-       viafb_write_reg_mask(CR17, VIACR, 0x80, BIT7);
-
-       /* Reset PLL */
-       if (set_iga == IGA1) {
-               viafb_write_reg_mask(SR40, VIASR, 0x02, BIT1);
-               viafb_write_reg_mask(SR40, VIASR, 0x00, BIT1);
-       }
-
-       if (set_iga == IGA2) {
-               viafb_write_reg_mask(SR40, VIASR, 0x04, BIT2);
-               viafb_write_reg_mask(SR40, VIASR, 0x00, BIT2);
-       }
+       if (set_iga == IGA1)
+               clock.set_primary_pll(config);
+       if (set_iga == IGA2)
+               clock.set_secondary_pll(config);
 
        /* Fire! */
        via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
@@ -2002,7 +1768,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
        int i;
        int index = 0;
        int h_addr, v_addr;
-       u32 pll_D_N, clock, refresh = viafb_refresh;
+       u32 clock, refresh = viafb_refresh;
 
        if (viafb_SAMM_ON && set_iga == IGA2)
                refresh = viafb_refresh1;
@@ -2033,8 +1799,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
        v_addr = crt_reg.ver_addr;
        if (set_iga == IGA1) {
                viafb_unlock_crt();
-               viafb_write_reg(CR09, VIACR, 0x00);     /*initial CR09=0 */
-               viafb_write_reg_mask(CR11, VIACR, 0x00, BIT4 + BIT5 + BIT6);
                viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
        }
 
@@ -2047,7 +1811,6 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
                break;
        }
 
-       load_fix_bit_crtc_reg();
        viafb_lock_crt();
        viafb_write_reg_mask(CR17, VIACR, 0x80, BIT7);
        viafb_load_fetch_count_reg(h_addr, bpp_byte, set_iga);
@@ -2059,20 +1822,17 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
 
        clock = crt_reg.hor_total * crt_reg.ver_total
                * crt_table[index].refresh_rate;
-       pll_D_N = viafb_get_clk_value(clock);
-       DEBUG_MSG(KERN_INFO "PLL=%x", pll_D_N);
-       viafb_set_vclock(pll_D_N, set_iga);
+       viafb_set_vclock(clock, set_iga);
 
 }
 
 void __devinit viafb_init_chip_info(int chip_type)
 {
+       via_clock_init(&clock, chip_type);
        init_gfx_chip_info(chip_type);
        init_tmds_chip_info();
        init_lvds_chip_info();
 
-       viaparinfo->crt_setting_info->iga_path = IGA1;
-
        /*Set IGA path for each device */
        viafb_set_iga_path();
 
@@ -2354,6 +2114,7 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
        outb(0x00, VIAAR);
 
        /* Write Common Setting for Video Mode */
+       viafb_write_regx(common_vga, ARRAY_SIZE(common_vga));
        switch (viaparinfo->chip_info->gfx_chip_name) {
        case UNICHROME_CLE266:
                viafb_write_regx(CLE266_ModeXregs, NUM_TOTAL_CLE266_ModeXregs);
@@ -2400,9 +2161,6 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
 
        viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
 
-       /* Write CRTC */
-       viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
-
        /* Write Graphic Controller */
        for (i = 0; i < StdGR; i++)
                via_write_reg(VIAGR, i, VPIT.GR[i]);
@@ -2432,6 +2190,7 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                }
        }
 
+       load_fix_bit_crtc_reg();
        via_set_primary_pitch(viafbinfo->fix.line_length);
        via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
                : viafbinfo->fix.line_length);
@@ -2451,15 +2210,15 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
 
        /* CRT set mode */
        if (viafb_CRT_ON) {
-               if (viafb_SAMM_ON && (viaparinfo->crt_setting_info->iga_path ==
-                       IGA2)) {
+               if (viafb_SAMM_ON &&
+                       viaparinfo->shared->iga2_devices & VIA_CRT) {
                        viafb_fill_crtc_timing(crt_timing1, vmode_tbl1,
-                               video_bpp1 / 8,
-                               viaparinfo->crt_setting_info->iga_path);
+                               video_bpp1 / 8, IGA2);
                } else {
                        viafb_fill_crtc_timing(crt_timing, vmode_tbl,
                                video_bpp / 8,
-                               viaparinfo->crt_setting_info->iga_path);
+                               (viaparinfo->shared->iga1_devices & VIA_CRT)
+                               ? IGA1 : IGA2);
                }
 
                /* Patch if set_hres is not 8 alignment (1366) to viafb_setmode
@@ -2557,6 +2316,33 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
                        get_sync(viafbinfo1));
        }
 
+       clock.set_engine_pll_state(VIA_STATE_ON);
+       clock.set_primary_clock_source(VIA_CLKSRC_X1, true);
+       clock.set_secondary_clock_source(VIA_CLKSRC_X1, true);
+
+#ifdef CONFIG_FB_VIA_X_COMPATIBILITY
+       clock.set_primary_pll_state(VIA_STATE_ON);
+       clock.set_primary_clock_state(VIA_STATE_ON);
+       clock.set_secondary_pll_state(VIA_STATE_ON);
+       clock.set_secondary_clock_state(VIA_STATE_ON);
+#else
+       if (viaparinfo->shared->iga1_devices) {
+               clock.set_primary_pll_state(VIA_STATE_ON);
+               clock.set_primary_clock_state(VIA_STATE_ON);
+       } else {
+               clock.set_primary_pll_state(VIA_STATE_OFF);
+               clock.set_primary_clock_state(VIA_STATE_OFF);
+       }
+
+       if (viaparinfo->shared->iga2_devices) {
+               clock.set_secondary_pll_state(VIA_STATE_ON);
+               clock.set_secondary_clock_state(VIA_STATE_ON);
+       } else {
+               clock.set_secondary_pll_state(VIA_STATE_OFF);
+               clock.set_secondary_clock_state(VIA_STATE_OFF);
+       }
+#endif /*CONFIG_FB_VIA_X_COMPATIBILITY*/
+
        via_set_state(devices, VIA_STATE_ON);
        device_screen_on();
        return 1;
@@ -2598,8 +2384,12 @@ int viafb_get_refresh(int hres, int vres, u32 long_refresh)
                        best = &vmode->crtc[i];
        }
 
-       if (abs(best->refresh_rate - long_refresh) > 3)
-               return 60;
+       if (abs(best->refresh_rate - long_refresh) > 3) {
+               if (hres == 1200 && vres == 900)
+                       return 49; /* OLPC DCON only supports 50 Hz */
+               else
+                       return 60;
+       }
 
        return best->refresh_rate;
 }
index 8858593405aab122049c5d4a1365741287e7b98b..c7239eb83bae30aaad17e356b7a712d973751d50 100644 (file)
@@ -732,20 +732,13 @@ struct _lcd_scaling_factor {
        struct _lcd_ver_scaling_factor lcd_ver_scaling_factor;
 };
 
-struct pll_config {
-       u16 multiplier;
+struct pll_limit {
+       u16 multiplier_min;
+       u16 multiplier_max;
        u8 divisor;
        u8 rshift;
 };
 
-struct pll_map {
-       u32 clk;
-       struct pll_config cle266_pll;
-       struct pll_config k800_pll;
-       struct pll_config cx700_pll;
-       struct pll_config vx855_pll;
-};
-
 struct rgbLUT {
        u8 red;
        u8 green;
@@ -910,7 +903,6 @@ struct via_device_mapping {
        const char *name;
 };
 
-extern unsigned int viafb_second_virtual_xres;
 extern int viafb_SAMM_ON;
 extern int viafb_dual_fb;
 extern int viafb_LCD2_ON;
@@ -936,7 +928,6 @@ void viafb_lock_crt(void);
 void viafb_unlock_crt(void);
 void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga);
 void viafb_write_regx(struct io_reg RegTable[], int ItemNum);
-u32 viafb_get_clk_value(int clk);
 void viafb_load_FIFO_reg(int set_iga, int hor_active, int ver_active);
 void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
                                        *p_gfx_dpa_setting);
index 64bc7e76310395a4c2f316d6093290c9a22f04f0..6e06981d638be99f44c0b5ef9fc5ef2d90cfb883 100644 (file)
@@ -48,7 +48,6 @@ static struct _lcd_scaling_factor lcd_scaling_factor_CLE = {
        {LCD_VER_SCALING_FACTOR_REG_NUM_CLE, {{CR78, 0, 7}, {CR79, 6, 7} } }
 };
 
-static int check_lvds_chip(int device_id_subaddr, int device_id);
 static bool lvds_identify_integratedlvds(void);
 static void __devinit fp_id_to_vindex(int panel_id);
 static int lvds_register_read(int index);
@@ -84,12 +83,9 @@ static struct display_timing lcd_centering_timging(struct display_timing
                                            mode_crt_reg,
                                           struct display_timing panel_crt_reg);
 
-static int check_lvds_chip(int device_id_subaddr, int device_id)
+static inline bool check_lvds_chip(int device_id_subaddr, int device_id)
 {
-       if (lvds_register_read(device_id_subaddr) == device_id)
-               return OK;
-       else
-               return FAIL;
+       return lvds_register_read(device_id_subaddr) == device_id;
 }
 
 void __devinit viafb_init_lcd_size(void)
@@ -150,7 +146,7 @@ static bool lvds_identify_integratedlvds(void)
        return true;
 }
 
-int __devinit viafb_lvds_trasmitter_identify(void)
+bool __devinit viafb_lvds_trasmitter_identify(void)
 {
        if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
                viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
@@ -175,20 +171,20 @@ int __devinit viafb_lvds_trasmitter_identify(void)
        viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
                VT1631_LVDS_I2C_ADDR;
 
-       if (check_lvds_chip(VT1631_DEVICE_ID_REG, VT1631_DEVICE_ID) != FAIL) {
+       if (check_lvds_chip(VT1631_DEVICE_ID_REG, VT1631_DEVICE_ID)) {
                DEBUG_MSG(KERN_INFO "\n VT1631 LVDS ! \n");
                DEBUG_MSG(KERN_INFO "\n %2d",
                          viaparinfo->chip_info->lvds_chip_info.lvds_chip_name);
                DEBUG_MSG(KERN_INFO "\n %2d",
                          viaparinfo->chip_info->lvds_chip_info.lvds_chip_name);
-               return OK;
+               return true;
        }
 
        viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
                NON_LVDS_TRANSMITTER;
        viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
                VT1631_LVDS_I2C_ADDR;
-       return FAIL;
+       return false;
 }
 
 static void __devinit fp_id_to_vindex(int panel_id)
@@ -562,7 +558,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
        int set_vres = plvds_setting_info->v_active;
        int panel_hres = plvds_setting_info->lcd_panel_hres;
        int panel_vres = plvds_setting_info->lcd_panel_vres;
-       u32 pll_D_N, clock;
+       u32 clock;
        struct display_timing mode_crt_reg, panel_crt_reg;
        struct crt_mode_table *panel_crt_table = NULL;
        struct VideoModeTable *vmode_tbl = viafb_get_mode(panel_hres,
@@ -613,10 +609,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
                viafb_load_FIFO_reg(set_iga, set_hres, set_vres);
 
        fill_lcd_format();
-
-       pll_D_N = viafb_get_clk_value(clock);
-       DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
-       viafb_set_vclock(pll_D_N, set_iga);
+       viafb_set_vclock(clock, set_iga);
        lcd_patch_skew(plvds_setting_info, plvds_chip_info);
 
        /* If K8M800, enable LCD Prefetch Mode. */
index c7909fe29550a7bd8846c51244eb48743d3cd28a..75f60a655b0e09c334a77d8418a656edefb8c467 100644 (file)
@@ -79,7 +79,7 @@ void __devinit viafb_init_lvds_output_interface(struct lvds_chip_information
 void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
                  struct lvds_setting_information *plvds_setting_info,
                  struct lvds_chip_information *plvds_chip_info);
-int __devinit viafb_lvds_trasmitter_identify(void);
+bool __devinit viafb_lvds_trasmitter_identify(void);
 void viafb_init_lvds_output_interface(struct lvds_chip_information
                                *plvds_chip_info,
                                struct lvds_setting_information
index 4b7831f0d012d51be5cf18281c32104206e60257..61b0bd596b85db28553cbae06bcdeb8f6d4128ca 100644 (file)
 #ifndef __SHARE_H__
 #define __SHARE_H__
 
-/* Define Return Value */
-#define FAIL        -1
-#define OK          1
-
-#ifndef NULL
-#define NULL 0
-#endif
-
 /* Define Bit Field */
 #define BIT0    0x01
 #define BIT1    0x02
 #define HW_LAYOUT_LCD_EXTERNAL_LCD2 0x10
 
 /* Definition Refresh Rate */
+#define REFRESH_49      49
 #define REFRESH_50      50
 #define REFRESH_60      60
 #define REFRESH_75      75
 #define M1280X720_R50_HSP       NEGATIVE
 #define M1280X720_R50_VSP       POSITIVE
 
-/* 1280x720@60 Sync Polarity  (CEA Mode) */
-#define M1280X720_CEA_R60_HSP       POSITIVE
-#define M1280X720_CEA_R60_VSP       POSITIVE
-
 /* 1440x900@60 Sync Polarity (CVT Mode) */
 #define M1440X900_R60_HSP       NEGATIVE
 #define M1440X900_R60_VSP       POSITIVE
 #define M1920X1200_RB_R60_HSP  POSITIVE
 #define M1920X1200_RB_R60_VSP  NEGATIVE
 
-/* 1920x1080@60 Sync Polarity  (CEA Mode) */
-#define M1920X1080_CEA_R60_HSP       POSITIVE
-#define M1920X1080_CEA_R60_VSP       POSITIVE
-
 /* 2048x1536@60 Sync Polarity (CVT Mode) */
 #define M2048x1536_R60_HSP      NEGATIVE
 #define M2048x1536_R60_VSP      POSITIVE
index 6723d6910cde7831175df8932f9adbc5c45f9c76..eb112b62173565984c28e68b400b40f798b1877e 100644 (file)
@@ -505,7 +505,14 @@ static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
        ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
        if (ret < 0)
                goto out_unmap;
-       vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
+
+       /* try to map less memory on failure, 8 MB should be still enough */
+       for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) {
+               vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len);
+               if (vdev->fbmem)
+                       break;
+       }
+
        if (vdev->fbmem == NULL) {
                ret = -ENOMEM;
                goto out_unmap;
diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
new file mode 100644 (file)
index 0000000..af8f26b
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * clock and PLL management functions
+ */
+
+#include <linux/kernel.h>
+#include <linux/via-core.h>
+#include "via_clock.h"
+#include "global.h"
+#include "debug.h"
+
+const char *via_slap = "Please slap VIA Technologies to motivate them "
+       "releasing full documentation for your platform!\n";
+
+static inline u32 cle266_encode_pll(struct via_pll_config pll)
+{
+       return (pll.multiplier << 8)
+               | (pll.rshift << 6)
+               | pll.divisor;
+}
+
+static inline u32 k800_encode_pll(struct via_pll_config pll)
+{
+       return ((pll.divisor - 2) << 16)
+               | (pll.rshift << 10)
+               | (pll.multiplier - 2);
+}
+
+static inline u32 vx855_encode_pll(struct via_pll_config pll)
+{
+       return (pll.divisor << 16)
+               | (pll.rshift << 10)
+               | pll.multiplier;
+}
+
+static inline void cle266_set_primary_pll_encoded(u32 data)
+{
+       via_write_reg_mask(VIASR, 0x40, 0x02, 0x02); /* enable reset */
+       via_write_reg(VIASR, 0x46, data & 0xFF);
+       via_write_reg(VIASR, 0x47, (data >> 8) & 0xFF);
+       via_write_reg_mask(VIASR, 0x40, 0x00, 0x02); /* disable reset */
+}
+
+static inline void k800_set_primary_pll_encoded(u32 data)
+{
+       via_write_reg_mask(VIASR, 0x40, 0x02, 0x02); /* enable reset */
+       via_write_reg(VIASR, 0x44, data & 0xFF);
+       via_write_reg(VIASR, 0x45, (data >> 8) & 0xFF);
+       via_write_reg(VIASR, 0x46, (data >> 16) & 0xFF);
+       via_write_reg_mask(VIASR, 0x40, 0x00, 0x02); /* disable reset */
+}
+
+static inline void cle266_set_secondary_pll_encoded(u32 data)
+{
+       via_write_reg_mask(VIASR, 0x40, 0x04, 0x04); /* enable reset */
+       via_write_reg(VIASR, 0x44, data & 0xFF);
+       via_write_reg(VIASR, 0x45, (data >> 8) & 0xFF);
+       via_write_reg_mask(VIASR, 0x40, 0x00, 0x04); /* disable reset */
+}
+
+static inline void k800_set_secondary_pll_encoded(u32 data)
+{
+       via_write_reg_mask(VIASR, 0x40, 0x04, 0x04); /* enable reset */
+       via_write_reg(VIASR, 0x4A, data & 0xFF);
+       via_write_reg(VIASR, 0x4B, (data >> 8) & 0xFF);
+       via_write_reg(VIASR, 0x4C, (data >> 16) & 0xFF);
+       via_write_reg_mask(VIASR, 0x40, 0x00, 0x04); /* disable reset */
+}
+
+static inline void set_engine_pll_encoded(u32 data)
+{
+       via_write_reg_mask(VIASR, 0x40, 0x01, 0x01); /* enable reset */
+       via_write_reg(VIASR, 0x47, data & 0xFF);
+       via_write_reg(VIASR, 0x48, (data >> 8) & 0xFF);
+       via_write_reg(VIASR, 0x49, (data >> 16) & 0xFF);
+       via_write_reg_mask(VIASR, 0x40, 0x00, 0x01); /* disable reset */
+}
+
+static void cle266_set_primary_pll(struct via_pll_config config)
+{
+       cle266_set_primary_pll_encoded(cle266_encode_pll(config));
+}
+
+static void k800_set_primary_pll(struct via_pll_config config)
+{
+       k800_set_primary_pll_encoded(k800_encode_pll(config));
+}
+
+static void vx855_set_primary_pll(struct via_pll_config config)
+{
+       k800_set_primary_pll_encoded(vx855_encode_pll(config));
+}
+
+static void cle266_set_secondary_pll(struct via_pll_config config)
+{
+       cle266_set_secondary_pll_encoded(cle266_encode_pll(config));
+}
+
+static void k800_set_secondary_pll(struct via_pll_config config)
+{
+       k800_set_secondary_pll_encoded(k800_encode_pll(config));
+}
+
+static void vx855_set_secondary_pll(struct via_pll_config config)
+{
+       k800_set_secondary_pll_encoded(vx855_encode_pll(config));
+}
+
+static void k800_set_engine_pll(struct via_pll_config config)
+{
+       set_engine_pll_encoded(k800_encode_pll(config));
+}
+
+static void vx855_set_engine_pll(struct via_pll_config config)
+{
+       set_engine_pll_encoded(vx855_encode_pll(config));
+}
+
+static void set_primary_pll_state(u8 state)
+{
+       u8 value;
+
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x20;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
+
+       via_write_reg_mask(VIASR, 0x2D, value, 0x30);
+}
+
+static void set_secondary_pll_state(u8 state)
+{
+       u8 value;
+
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x08;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
+
+       via_write_reg_mask(VIASR, 0x2D, value, 0x0C);
+}
+
+static void set_engine_pll_state(u8 state)
+{
+       u8 value;
+
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x02;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
+
+       via_write_reg_mask(VIASR, 0x2D, value, 0x03);
+}
+
+static void set_primary_clock_state(u8 state)
+{
+       u8 value;
+
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x20;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
+
+       via_write_reg_mask(VIASR, 0x1B, value, 0x30);
+}
+
+static void set_secondary_clock_state(u8 state)
+{
+       u8 value;
+
+       switch (state) {
+       case VIA_STATE_ON:
+               value = 0x80;
+               break;
+       case VIA_STATE_OFF:
+               value = 0x00;
+               break;
+       default:
+               return;
+       }
+
+       via_write_reg_mask(VIASR, 0x1B, value, 0xC0);
+}
+
+static inline u8 set_clock_source_common(enum via_clksrc source, bool use_pll)
+{
+       u8 data = 0;
+
+       switch (source) {
+       case VIA_CLKSRC_X1:
+               data = 0x00;
+               break;
+       case VIA_CLKSRC_TVX1:
+               data = 0x02;
+               break;
+       case VIA_CLKSRC_TVPLL:
+               data = 0x04; /* 0x06 should be the same */
+               break;
+       case VIA_CLKSRC_DVP1TVCLKR:
+               data = 0x0A;
+               break;
+       case VIA_CLKSRC_CAP0:
+               data = 0xC;
+               break;
+       case VIA_CLKSRC_CAP1:
+               data = 0x0E;
+               break;
+       }
+
+       if (!use_pll)
+               data |= 1;
+
+       return data;
+}
+
+static void set_primary_clock_source(enum via_clksrc source, bool use_pll)
+{
+       u8 data = set_clock_source_common(source, use_pll) << 4;
+       via_write_reg_mask(VIACR, 0x6C, data, 0xF0);
+}
+
+static void set_secondary_clock_source(enum via_clksrc source, bool use_pll)
+{
+       u8 data = set_clock_source_common(source, use_pll);
+       via_write_reg_mask(VIACR, 0x6C, data, 0x0F);
+}
+
+static void dummy_set_clock_state(u8 state)
+{
+       printk(KERN_INFO "Using undocumented set clock state.\n%s", via_slap);
+}
+
+static void dummy_set_clock_source(enum via_clksrc source, bool use_pll)
+{
+       printk(KERN_INFO "Using undocumented set clock source.\n%s", via_slap);
+}
+
+static void dummy_set_pll_state(u8 state)
+{
+       printk(KERN_INFO "Using undocumented set PLL state.\n%s", via_slap);
+}
+
+static void dummy_set_pll(struct via_pll_config config)
+{
+       printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+}
+
+void via_clock_init(struct via_clock *clock, int gfx_chip)
+{
+       switch (gfx_chip) {
+       case UNICHROME_CLE266:
+       case UNICHROME_K400:
+               clock->set_primary_clock_state = dummy_set_clock_state;
+               clock->set_primary_clock_source = dummy_set_clock_source;
+               clock->set_primary_pll_state = dummy_set_pll_state;
+               clock->set_primary_pll = cle266_set_primary_pll;
+
+               clock->set_secondary_clock_state = dummy_set_clock_state;
+               clock->set_secondary_clock_source = dummy_set_clock_source;
+               clock->set_secondary_pll_state = dummy_set_pll_state;
+               clock->set_secondary_pll = cle266_set_secondary_pll;
+
+               clock->set_engine_pll_state = dummy_set_pll_state;
+               clock->set_engine_pll = dummy_set_pll;
+               break;
+       case UNICHROME_K800:
+       case UNICHROME_PM800:
+       case UNICHROME_CN700:
+       case UNICHROME_CX700:
+       case UNICHROME_CN750:
+       case UNICHROME_K8M890:
+       case UNICHROME_P4M890:
+       case UNICHROME_P4M900:
+       case UNICHROME_VX800:
+               clock->set_primary_clock_state = set_primary_clock_state;
+               clock->set_primary_clock_source = set_primary_clock_source;
+               clock->set_primary_pll_state = set_primary_pll_state;
+               clock->set_primary_pll = k800_set_primary_pll;
+
+               clock->set_secondary_clock_state = set_secondary_clock_state;
+               clock->set_secondary_clock_source = set_secondary_clock_source;
+               clock->set_secondary_pll_state = set_secondary_pll_state;
+               clock->set_secondary_pll = k800_set_secondary_pll;
+
+               clock->set_engine_pll_state = set_engine_pll_state;
+               clock->set_engine_pll = k800_set_engine_pll;
+               break;
+       case UNICHROME_VX855:
+       case UNICHROME_VX900:
+               clock->set_primary_clock_state = set_primary_clock_state;
+               clock->set_primary_clock_source = set_primary_clock_source;
+               clock->set_primary_pll_state = set_primary_pll_state;
+               clock->set_primary_pll = vx855_set_primary_pll;
+
+               clock->set_secondary_clock_state = set_secondary_clock_state;
+               clock->set_secondary_clock_source = set_secondary_clock_source;
+               clock->set_secondary_pll_state = set_secondary_pll_state;
+               clock->set_secondary_pll = vx855_set_secondary_pll;
+
+               clock->set_engine_pll_state = set_engine_pll_state;
+               clock->set_engine_pll = vx855_set_engine_pll;
+               break;
+
+       }
+}
diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
new file mode 100644 (file)
index 0000000..88714ae
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * clock and PLL management functions
+ */
+
+#ifndef __VIA_CLOCK_H__
+#define __VIA_CLOCK_H__
+
+#include <linux/types.h>
+
+enum via_clksrc {
+       VIA_CLKSRC_X1 = 0,
+       VIA_CLKSRC_TVX1,
+       VIA_CLKSRC_TVPLL,
+       VIA_CLKSRC_DVP1TVCLKR,
+       VIA_CLKSRC_CAP0,
+       VIA_CLKSRC_CAP1,
+};
+
+struct via_pll_config {
+       u16 multiplier;
+       u8 divisor;
+       u8 rshift;
+};
+
+struct via_clock {
+       void (*set_primary_clock_state)(u8 state);
+       void (*set_primary_clock_source)(enum via_clksrc src, bool use_pll);
+       void (*set_primary_pll_state)(u8 state);
+       void (*set_primary_pll)(struct via_pll_config config);
+
+       void (*set_secondary_clock_state)(u8 state);
+       void (*set_secondary_clock_source)(enum via_clksrc src, bool use_pll);
+       void (*set_secondary_pll_state)(u8 state);
+       void (*set_secondary_pll)(struct via_pll_config config);
+
+       void (*set_engine_pll_state)(u8 state);
+       void (*set_engine_pll)(struct via_pll_config config);
+};
+
+
+static inline u32 get_pll_internal_frequency(u32 ref_freq,
+       struct via_pll_config pll)
+{
+       return ref_freq / pll.divisor * pll.multiplier;
+}
+
+static inline u32 get_pll_output_frequency(u32 ref_freq,
+       struct via_pll_config pll)
+{
+       return get_pll_internal_frequency(ref_freq, pll) >> pll.rshift;
+}
+
+void via_clock_init(struct via_clock *clock, int gfx_chip);
+
+#endif /* __VIA_CLOCK_H__ */
index a542bed086e24900af1ee2f85ab670a2015ef4ff..cf43c80d27f6e47659d3add9aac2c22c2bc02e36 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/stat.h>
 #include <linux/via-core.h>
+#include <asm/olpc.h>
 
 #define _MASTER_FILE
 #include "global.h"
@@ -37,6 +38,8 @@ static char *viafb_mode1;
 static int viafb_bpp = 32;
 static int viafb_bpp1 = 32;
 
+static unsigned int viafb_second_xres = 640;
+static unsigned int viafb_second_yres = 480;
 static unsigned int viafb_second_offset;
 static int viafb_second_size;
 
@@ -440,8 +443,8 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
                if (viafb_SAMM_ON == 1) {
                        u.viamode.xres_sec = viafb_second_xres;
                        u.viamode.yres_sec = viafb_second_yres;
-                       u.viamode.virtual_xres_sec = viafb_second_virtual_xres;
-                       u.viamode.virtual_yres_sec = viafb_second_virtual_yres;
+                       u.viamode.virtual_xres_sec = viafb_dual_fb ? viafbinfo1->var.xres_virtual : viafbinfo->var.xres_virtual;
+                       u.viamode.virtual_yres_sec = viafb_dual_fb ? viafbinfo1->var.yres_virtual : viafbinfo->var.yres_virtual;
                        u.viamode.refresh_sec = viafb_refresh1;
                        u.viamode.bpp_sec = viafb_bpp1;
                } else {
@@ -930,10 +933,8 @@ static int get_primary_device(void)
        /* Rule: device on iga1 path are the primary device. */
        if (viafb_SAMM_ON) {
                if (viafb_CRT_ON) {
-                       if (viaparinfo->crt_setting_info->iga_path == IGA1) {
-                               DEBUG_MSG(KERN_INFO "CRT IGA Path:%d\n",
-                                       viaparinfo->
-                                       crt_setting_info->iga_path);
+                       if (viaparinfo->shared->iga1_devices & VIA_CRT) {
+                               DEBUG_MSG(KERN_INFO "CRT IGA Path:%d\n", IGA1);
                                primary_device = CRT_Device;
                        }
                }
@@ -1011,8 +1012,13 @@ static int __init parse_active_dev(void)
        /*    Note: The previous of active_dev is primary device,
           and the following is secondary device. */
        if (!viafb_active_dev) {
-               viafb_CRT_ON = STATE_ON;
-               viafb_SAMM_ON = STATE_OFF;
+               if (machine_is_olpc()) { /* LCD only */
+                       viafb_LCD_ON = STATE_ON;
+                       viafb_SAMM_ON = STATE_OFF;
+               } else {
+                       viafb_CRT_ON = STATE_ON;
+                       viafb_SAMM_ON = STATE_OFF;
+               }
        } else if (!strcmp(viafb_active_dev, "CRT+DVI")) {
                /* CRT+DVI */
                viafb_CRT_ON = STATE_ON;
@@ -1665,8 +1671,13 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
        char *ptr;
 
        if (!str) {
-               *xres = 640;
-               *yres = 480;
+               if (machine_is_olpc()) {
+                       *xres = 1200;
+                       *yres = 900;
+               } else {
+                       *xres = 640;
+                       *yres = 480;
+               }
                return 0;
        }
 
@@ -1746,7 +1757,6 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
        viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info;
        viaparinfo->lvds_setting_info2 =
                &viaparinfo->shared->lvds_setting_info2;
-       viaparinfo->crt_setting_info = &viaparinfo->shared->crt_setting_info;
        viaparinfo->chip_info = &viaparinfo->shared->chip_info;
 
        if (viafb_dual_fb)
@@ -1793,14 +1803,10 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
 
        parse_mode(viafb_mode, &default_xres, &default_yres);
        vmode_entry = viafb_get_mode(default_xres, default_yres);
-       if (viafb_SAMM_ON == 1) {
+       if (viafb_SAMM_ON == 1)
                parse_mode(viafb_mode1, &viafb_second_xres,
                        &viafb_second_yres);
 
-               viafb_second_virtual_xres = viafb_second_xres;
-               viafb_second_virtual_yres = viafb_second_yres;
-       }
-
        default_var.xres = default_xres;
        default_var.yres = default_yres;
        default_var.xres_virtual = default_xres;
@@ -1844,8 +1850,8 @@ int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
 
                default_var.xres = viafb_second_xres;
                default_var.yres = viafb_second_yres;
-               default_var.xres_virtual = viafb_second_virtual_xres;
-               default_var.yres_virtual = viafb_second_virtual_yres;
+               default_var.xres_virtual = viafb_second_xres;
+               default_var.yres_virtual = viafb_second_yres;
                default_var.bits_per_pixel = viafb_bpp1;
                viafb_fill_var_timing_info(&default_var, viafb_get_refresh(
                        default_var.xres, default_var.yres, viafb_refresh1),
@@ -1927,11 +1933,16 @@ void __devexit via_fb_pci_remove(struct pci_dev *pdev)
 }
 
 #ifndef MODULE
-static int __init viafb_setup(char *options)
+static int __init viafb_setup(void)
 {
        char *this_opt;
+       char *options;
+
        DEBUG_MSG(KERN_INFO "viafb_setup!\n");
 
+       if (fb_get_options("viafb", &options))
+               return -ENODEV;
+
        if (!options || !*options)
                return 0;
 
@@ -2005,11 +2016,16 @@ static int __init viafb_setup(char *options)
 int __init viafb_init(void)
 {
        u32 dummy_x, dummy_y;
+       int r;
+
+       if (machine_is_olpc())
+               /* Apply XO-1.5-specific configuration. */
+               viafb_lcd_panel_id = 23;
+
 #ifndef MODULE
-       char *option = NULL;
-       if (fb_get_options("viafb", &option))
-               return -ENODEV;
-       viafb_setup(option);
+       r = viafb_setup();
+       if (r < 0)
+               return r;
 #endif
        if (parse_mode(viafb_mode, &dummy_x, &dummy_y)
                || !viafb_get_mode(dummy_x, dummy_y)
index 137996dc547e98ffd7c12470a2bdfa2c296b02d7..d9440635d1d4bdc8683c63558c0cd028d6a08ef1 100644 (file)
@@ -50,7 +50,6 @@ struct viafb_shared {
 
        /* All the information will be needed to set engine */
        struct tmds_setting_information tmds_setting_info;
-       struct crt_setting_information crt_setting_info;
        struct lvds_setting_information lvds_setting_info;
        struct lvds_setting_information lvds_setting_info2;
        struct chip_information chip_info;
@@ -79,14 +78,11 @@ struct viafb_par {
        /* All the information will be needed to set engine */
        /* depreciated, use the ones in shared directly */
        struct tmds_setting_information *tmds_setting_info;
-       struct crt_setting_information *crt_setting_info;
        struct lvds_setting_information *lvds_setting_info;
        struct lvds_setting_information *lvds_setting_info2;
        struct chip_information *chip_info;
 };
 
-extern unsigned int viafb_second_virtual_yres;
-extern unsigned int viafb_second_virtual_xres;
 extern int viafb_SAMM_ON;
 extern int viafb_dual_fb;
 extern int viafb_LCD2_ON;
index 8c5bc41ff6a485a6a6dbc7ef0aa9933353bcf091..58df74e1417e0a744ddd2f822accf4647cfd48ba 100644 (file)
@@ -30,10 +30,6 @@ struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
 {VIASR, SR1A, 0xFB, 0x08},
 {VIASR, SR1E, 0x0F, 0x01},
 {VIASR, SR2A, 0xFF, 0x00},
-{VIACR, CR0A, 0xFF, 0x1E},     /* Cursor Start                        */
-{VIACR, CR0B, 0xFF, 0x00},     /* Cursor End                          */
-{VIACR, CR0E, 0xFF, 0x00},     /* Cursor Location High                */
-{VIACR, CR0F, 0xFF, 0x00},     /* Cursor Localtion Low                */
 {VIACR, CR32, 0xFF, 0x00},
 {VIACR, CR33, 0xFF, 0x00},
 {VIACR, CR35, 0xFF, 0x00},
@@ -41,7 +37,6 @@ struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
 {VIACR, CR69, 0xFF, 0x00},
 {VIACR, CR6A, 0xFF, 0x40},
 {VIACR, CR6B, 0xFF, 0x00},
-{VIACR, CR6C, 0xFF, 0x00},
 {VIACR, CR88, 0xFF, 0x40},     /* LCD Panel Type                      */
 {VIACR, CR89, 0xFF, 0x00},     /* LCD Timing Control 0                */
 {VIACR, CR8A, 0xFF, 0x88},     /* LCD Timing Control 1                */
@@ -87,7 +82,6 @@ struct io_reg CN700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
 {VIACR, CR69, 0xFF, 0x00},
 {VIACR, CR6A, 0xFD, 0x40},
 {VIACR, CR6B, 0xFF, 0x00},
-{VIACR, CR6C, 0xFF, 0x00},
 {VIACR, CR77, 0xFF, 0x00},     /* LCD scaling Factor */
 {VIACR, CR78, 0xFF, 0x00},     /* LCD scaling Factor */
 {VIACR, CR79, 0xFF, 0x00},     /* LCD scaling Factor */
@@ -125,10 +119,6 @@ struct io_reg KM400_ModeXregs[] = {
        {VIASR, SR2A, 0xFF, 0x00},      /* Power Management Control 5      */
        {VIASR, SR2D, 0xFF, 0xFF},      /* Power Management Control 1      */
        {VIASR, SR2E, 0xFF, 0xFF},      /* Power Management Control 2      */
-       {VIACR, CR0A, 0xFF, 0x1E},      /* Cursor Start                    */
-       {VIACR, CR0B, 0xFF, 0x00},      /* Cursor End                      */
-       {VIACR, CR0E, 0xFF, 0x00},      /* Cursor Location High            */
-       {VIACR, CR0F, 0xFF, 0x00},      /* Cursor Localtion Low            */
        {VIACR, CR33, 0xFF, 0x00},
        {VIACR, CR55, 0x80, 0x00},
        {VIACR, CR5D, 0x80, 0x00},
@@ -161,11 +151,7 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
 {VIASR, SR1B, 0xFF, 0xF0},
 {VIASR, SR1E, 0xFF, 0x01},
 {VIASR, SR2A, 0xFF, 0x00},
-{VIASR, SR2D, 0xFF, 0xFF},     /* VCK and LCK PLL power on.           */
-{VIACR, CR0A, 0xFF, 0x1E},     /* Cursor Start                        */
-{VIACR, CR0B, 0xFF, 0x00},     /* Cursor End                          */
-{VIACR, CR0E, 0xFF, 0x00},     /* Cursor Location High                */
-{VIACR, CR0F, 0xFF, 0x00},     /* Cursor Localtion Low                */
+{VIASR, SR2D, 0xC0, 0xC0},     /* delayed E3_ECK */
 {VIACR, CR32, 0xFF, 0x00},
 {VIACR, CR33, 0xFF, 0x00},
 {VIACR, CR35, 0xFF, 0x00},
@@ -174,7 +160,6 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
 {VIACR, CR69, 0xFF, 0x00},
 {VIACR, CR6A, 0xFF, 0x40},
 {VIACR, CR6B, 0xFF, 0x00},
-{VIACR, CR6C, 0xFF, 0x00},
 {VIACR, CR88, 0xFF, 0x40},     /* LCD Panel Type                      */
 {VIACR, CR89, 0xFF, 0x00},     /* LCD Timing Control 0                */
 {VIACR, CR8A, 0xFF, 0x88},     /* LCD Timing Control 1                */
@@ -204,14 +189,7 @@ struct io_reg VX855_ModeXregs[] = {
 {VIASR, SR2A, 0xF0, 0x00},
 {VIASR, SR58, 0xFF, 0x00},
 {VIASR, SR59, 0xFF, 0x00},
-{VIASR, SR2D, 0xFF, 0xFF},     /* VCK and LCK PLL power on.           */
-{VIACR, CR09, 0xFF, 0x00},     /* Initial CR09=0*/
-{VIACR, CR11, 0x8F, 0x00},     /* IGA1 initial  Vertical end       */
-{VIACR, CR17, 0x7F, 0x00},     /* IGA1 CRT Mode control init   */
-{VIACR, CR0A, 0xFF, 0x1E},     /* Cursor Start                        */
-{VIACR, CR0B, 0xFF, 0x00},     /* Cursor End                          */
-{VIACR, CR0E, 0xFF, 0x00},     /* Cursor Location High                */
-{VIACR, CR0F, 0xFF, 0x00},     /* Cursor Localtion Low                */
+{VIASR, SR2D, 0xC0, 0xC0},     /* delayed E3_ECK */
 {VIACR, CR32, 0xFF, 0x00},
 {VIACR, CR33, 0x7F, 0x00},
 {VIACR, CR35, 0xFF, 0x00},
@@ -219,7 +197,6 @@ struct io_reg VX855_ModeXregs[] = {
 {VIACR, CR69, 0xFF, 0x00},
 {VIACR, CR6A, 0xFD, 0x60},
 {VIACR, CR6B, 0xFF, 0x00},
-{VIACR, CR6C, 0xFF, 0x00},
 {VIACR, CR88, 0xFF, 0x40},          /* LCD Panel Type                      */
 {VIACR, CR89, 0xFF, 0x00},          /* LCD Timing Control 0                */
 {VIACR, CR8A, 0xFF, 0x88},          /* LCD Timing Control 1                */
@@ -606,7 +583,7 @@ static struct crt_mode_table CRTM1200x720[] = {
 /* 1200x900 (DCON) */
 static struct crt_mode_table DCON1200x900[] = {
        /* r_rate,               hsp,               vsp   */
-       {REFRESH_60, M1200X900_R60_HSP, M1200X900_R60_VSP,
+       {REFRESH_49, M1200X900_R60_HSP, M1200X900_R60_VSP,
        /* The correct htotal is 1240, but this doesn't raster on VX855. */
        /* Via suggested changing to a multiple of 16, hence 1264.       */
        /*  HT,   HA,  HBS, HBE,  HSS, HSE,  VT,  VA, VBS, VBE, VSS, VSE */
@@ -877,23 +854,6 @@ static struct VideoModeTable viafb_rb_modes[] = {
        {CRTM1920x1200_RB, ARRAY_SIZE(CRTM1920x1200_RB)}
 };
 
-struct crt_mode_table CEAM1280x720[] = {
-       {REFRESH_60, M1280X720_CEA_R60_HSP, M1280X720_CEA_R60_VSP,
-        /* HT,    HA,   HBS,  HBE,  HSS, HSE,  VT,   VA,  VBS, VBE, VSS, VSE */
-        {1650, 1280, 1280, 370, 1390, 40, 750, 720, 720, 30, 725, 5} }
-};
-struct crt_mode_table CEAM1920x1080[] = {
-       {REFRESH_60, M1920X1080_CEA_R60_HSP, M1920X1080_CEA_R60_VSP,
-        /* HT,    HA,   HBS,  HBE,  HSS, HSE,  VT,  VA, VBS, VBE,  VSS, VSE */
-        {2200, 1920, 1920, 300, 2008, 44, 1125, 1080, 1080, 45, 1084, 5} }
-};
-struct VideoModeTable CEA_HDMI_Modes[] = {
-       /* Display : 1280x720 */
-       {CEAM1280x720, ARRAY_SIZE(CEAM1280x720)},
-       {CEAM1920x1080, ARRAY_SIZE(CEAM1920x1080)}
-};
-
-int NUM_TOTAL_CEA_MODES = ARRAY_SIZE(CEA_HDMI_Modes);
 int NUM_TOTAL_CN400_ModeXregs = ARRAY_SIZE(CN400_ModeXregs);
 int NUM_TOTAL_CN700_ModeXregs = ARRAY_SIZE(CN700_ModeXregs);
 int NUM_TOTAL_KM400_ModeXregs = ARRAY_SIZE(KM400_ModeXregs);
index 8a67ea1b5ef03add2468080f772ebf880313901c..3751289eb4506a1faa2a87a187dd6737ce37c598 100644 (file)
@@ -41,7 +41,6 @@ struct patch_table {
        struct io_reg *io_reg_table;
 };
 
-extern int NUM_TOTAL_CEA_MODES;
 extern int NUM_TOTAL_CN400_ModeXregs;
 extern int NUM_TOTAL_CN700_ModeXregs;
 extern int NUM_TOTAL_KM400_ModeXregs;
@@ -50,14 +49,6 @@ extern int NUM_TOTAL_VX855_ModeXregs;
 extern int NUM_TOTAL_CLE266_ModeXregs;
 extern int NUM_TOTAL_PATCH_MODE;
 
-/********************/
-/* Mode Table       */
-/********************/
-
-extern struct crt_mode_table CEAM1280x720[];
-extern struct crt_mode_table CEAM1920x1080[];
-extern struct VideoModeTable CEA_HDMI_Modes[];
-
 extern struct io_reg CN400_ModeXregs[];
 extern struct io_reg CN700_ModeXregs[];
 extern struct io_reg KM400_ModeXregs[];
index 33c963d08ab49ecc4244a5e817db4af525794ce5..05f4fa521325b071f7ad114354b30233b131ae48 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mount.h>
 #include <linux/async.h>
 #include <linux/posix_acl.h>
+#include <linux/prefetch.h>
 #include <linux/ima.h>
 #include <linux/cred.h>
 #include "internal.h"
index f7684483785e848c1e59657ab141801100796012..eed4d7b262491ae8e48ee401f81f38e25f40ddb1 100644 (file)
@@ -489,8 +489,8 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
 void nilfs_palloc_commit_alloc_entry(struct inode *inode,
                                     struct nilfs_palloc_req *req)
 {
-       nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
-       nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
+       mark_buffer_dirty(req->pr_bitmap_bh);
+       mark_buffer_dirty(req->pr_desc_bh);
        nilfs_mdt_mark_dirty(inode);
 
        brelse(req->pr_bitmap_bh);
@@ -527,8 +527,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
        kunmap(req->pr_bitmap_bh->b_page);
        kunmap(req->pr_desc_bh->b_page);
 
-       nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
-       nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
+       mark_buffer_dirty(req->pr_desc_bh);
+       mark_buffer_dirty(req->pr_bitmap_bh);
        nilfs_mdt_mark_dirty(inode);
 
        brelse(req->pr_bitmap_bh);
@@ -683,8 +683,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                kunmap(bitmap_bh->b_page);
                kunmap(desc_bh->b_page);
 
-               nilfs_mdt_mark_buffer_dirty(desc_bh);
-               nilfs_mdt_mark_buffer_dirty(bitmap_bh);
+               mark_buffer_dirty(desc_bh);
+               mark_buffer_dirty(bitmap_bh);
                nilfs_mdt_mark_dirty(inode);
 
                brelse(bitmap_bh);
index 4723f04e9b12a0e904f945997b5e1aa73cfc757b..aadbd0b5e3e8fbe516261e5ae069296b8ed940f8 100644 (file)
@@ -34,7 +34,9 @@
 
 struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
 {
-       return NILFS_I_NILFS(bmap->b_inode)->ns_dat;
+       struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info;
+
+       return nilfs->ns_dat;
 }
 
 static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
index 609cd223eea85abf39232688e2ccbd9dad94ee68..a35ae35e69320f711454125f9a600586869bb980 100644 (file)
 #include "page.h"
 #include "btnode.h"
 
-void nilfs_btnode_cache_init(struct address_space *btnc,
-                            struct backing_dev_info *bdi)
-{
-       nilfs_mapping_init(btnc, bdi);
-}
-
 void nilfs_btnode_cache_clear(struct address_space *btnc)
 {
        invalidate_mapping_pages(btnc, 0, -1);
@@ -62,7 +56,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
                BUG();
        }
        memset(bh->b_data, 0, 1 << inode->i_blkbits);
-       bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
+       bh->b_bdev = inode->i_sb->s_bdev;
        bh->b_blocknr = blocknr;
        set_buffer_mapped(bh);
        set_buffer_uptodate(bh);
@@ -94,10 +88,11 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
        if (pblocknr == 0) {
                pblocknr = blocknr;
                if (inode->i_ino != NILFS_DAT_INO) {
-                       struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
+                       struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 
                        /* blocknr is a virtual block number */
-                       err = nilfs_dat_translate(dat, blocknr, &pblocknr);
+                       err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
+                                                 &pblocknr);
                        if (unlikely(err)) {
                                brelse(bh);
                                goto out_locked;
@@ -120,7 +115,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
                goto found;
        }
        set_buffer_mapped(bh);
-       bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
+       bh->b_bdev = inode->i_sb->s_bdev;
        bh->b_blocknr = pblocknr; /* set block address for read */
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
@@ -259,7 +254,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
                                       "invalid oldkey %lld (newkey=%lld)",
                                       (unsigned long long)oldkey,
                                       (unsigned long long)newkey);
-               nilfs_btnode_mark_dirty(obh);
+               mark_buffer_dirty(obh);
 
                spin_lock_irq(&btnc->tree_lock);
                radix_tree_delete(&btnc->page_tree, oldkey);
@@ -271,7 +266,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
                unlock_page(opage);
        } else {
                nilfs_copy_buffer(nbh, obh);
-               nilfs_btnode_mark_dirty(nbh);
+               mark_buffer_dirty(nbh);
 
                nbh->b_blocknr = newkey;
                ctxt->bh = nbh;
index 1b8ebd888c2844348a128a37e0781c1b82915aea..3a4dd2d8d3fc9fe47fceec0aed1a0e09837449e4 100644 (file)
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
        struct buffer_head *newbh;
 };
 
-void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
 void nilfs_btnode_cache_clear(struct address_space *);
 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
                                              __u64 blocknr);
@@ -51,7 +50,4 @@ void nilfs_btnode_commit_change_key(struct address_space *,
 void nilfs_btnode_abort_change_key(struct address_space *,
                                   struct nilfs_btnode_chkey_ctxt *);
 
-#define nilfs_btnode_mark_dirty(bh)    nilfs_mark_buffer_dirty(bh)
-
-
 #endif /* _NILFS_BTNODE_H */
index d451ae0e0bf373917b88e4591b60f4ad912bc3db..7eafe468a29c71cb5338612149a435cf8b037756 100644 (file)
@@ -714,7 +714,7 @@ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
                                nilfs_btree_get_nonroot_node(path, level),
                                path[level].bp_index, key);
                        if (!buffer_dirty(path[level].bp_bh))
-                               nilfs_btnode_mark_dirty(path[level].bp_bh);
+                               mark_buffer_dirty(path[level].bp_bh);
                } while ((path[level].bp_index == 0) &&
                         (++level < nilfs_btree_height(btree) - 1));
        }
@@ -739,7 +739,7 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
                nilfs_btree_node_insert(node, path[level].bp_index,
                                        *keyp, *ptrp, ncblk);
                if (!buffer_dirty(path[level].bp_bh))
-                       nilfs_btnode_mark_dirty(path[level].bp_bh);
+                       mark_buffer_dirty(path[level].bp_bh);
 
                if (path[level].bp_index == 0)
                        nilfs_btree_promote_key(btree, path, level + 1,
@@ -777,9 +777,9 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
        nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
@@ -823,9 +823,9 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
        nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level + 1].bp_index++;
        nilfs_btree_promote_key(btree, path, level + 1,
@@ -870,9 +870,9 @@ static void nilfs_btree_split(struct nilfs_bmap *btree,
        nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        newkey = nilfs_btree_node_get_key(right, 0);
        newptr = path[level].bp_newreq.bpr_ptr;
@@ -919,7 +919,7 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree,
        nilfs_btree_node_set_level(root, level + 1);
 
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level].bp_bh = path[level].bp_sib_bh;
        path[level].bp_sib_bh = NULL;
@@ -1194,7 +1194,7 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
                nilfs_btree_node_delete(node, path[level].bp_index,
                                        keyp, ptrp, ncblk);
                if (!buffer_dirty(path[level].bp_bh))
-                       nilfs_btnode_mark_dirty(path[level].bp_bh);
+                       mark_buffer_dirty(path[level].bp_bh);
                if (path[level].bp_index == 0)
                        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
@@ -1226,9 +1226,9 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
        nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
@@ -1258,9 +1258,9 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
        nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level + 1].bp_index++;
        nilfs_btree_promote_key(btree, path, level + 1,
@@ -1289,7 +1289,7 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
        nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btnode_delete(path[level].bp_bh);
        path[level].bp_bh = path[level].bp_sib_bh;
@@ -1315,7 +1315,7 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
        nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
 
        nilfs_btnode_delete(path[level].bp_sib_bh);
        path[level].bp_sib_bh = NULL;
@@ -1709,7 +1709,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
                nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
                nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
                if (!buffer_dirty(bh))
-                       nilfs_btnode_mark_dirty(bh);
+                       mark_buffer_dirty(bh);
                if (!nilfs_bmap_dirty(btree))
                        nilfs_bmap_set_dirty(btree);
 
@@ -1787,7 +1787,7 @@ static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
 {
        while ((++level < nilfs_btree_height(btree) - 1) &&
               !buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
 
        return 0;
 }
@@ -2229,7 +2229,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
        }
 
        if (!buffer_dirty(bh))
-               nilfs_btnode_mark_dirty(bh);
+               mark_buffer_dirty(bh);
        brelse(bh);
        if (!nilfs_bmap_dirty(btree))
                nilfs_bmap_set_dirty(btree);
index 5ff15a8a10242d883c78e69bb54ed9f0ac17c43d..c9b342c8b503dc4b50cd60f48b6698d3f68a3d4f 100644 (file)
@@ -216,14 +216,14 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
                if (!nilfs_cpfile_is_in_first(cpfile, cno))
                        nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
                                                                 kaddr, 1);
-               nilfs_mdt_mark_buffer_dirty(cp_bh);
+               mark_buffer_dirty(cp_bh);
 
                kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, 1);
                kunmap_atomic(kaddr, KM_USER0);
-               nilfs_mdt_mark_buffer_dirty(header_bh);
+               mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
        }
 
@@ -326,7 +326,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                }
                if (nicps > 0) {
                        tnicps += nicps;
-                       nilfs_mdt_mark_buffer_dirty(cp_bh);
+                       mark_buffer_dirty(cp_bh);
                        nilfs_mdt_mark_dirty(cpfile);
                        if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
                                count =
@@ -358,7 +358,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
-               nilfs_mdt_mark_buffer_dirty(header_bh);
+               mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
                kunmap_atomic(kaddr, KM_USER0);
        }
@@ -671,10 +671,10 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
        le64_add_cpu(&header->ch_nsnapshots, 1);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(prev_bh);
-       nilfs_mdt_mark_buffer_dirty(curr_bh);
-       nilfs_mdt_mark_buffer_dirty(cp_bh);
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(prev_bh);
+       mark_buffer_dirty(curr_bh);
+       mark_buffer_dirty(cp_bh);
+       mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(cpfile);
 
        brelse(prev_bh);
@@ -774,10 +774,10 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
        le64_add_cpu(&header->ch_nsnapshots, -1);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(next_bh);
-       nilfs_mdt_mark_buffer_dirty(prev_bh);
-       nilfs_mdt_mark_buffer_dirty(cp_bh);
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(next_bh);
+       mark_buffer_dirty(prev_bh);
+       mark_buffer_dirty(cp_bh);
+       mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(cpfile);
 
        brelse(prev_bh);
index 59e5fe742f7bd7bcc6d6aa88dbdcf595e4c64019..fcc2f869af1630a852e3663bfd937eecac86e619 100644 (file)
@@ -54,7 +54,7 @@ static int nilfs_dat_prepare_entry(struct inode *dat,
 static void nilfs_dat_commit_entry(struct inode *dat,
                                   struct nilfs_palloc_req *req)
 {
-       nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
+       mark_buffer_dirty(req->pr_entry_bh);
        nilfs_mdt_mark_dirty(dat);
        brelse(req->pr_entry_bh);
 }
@@ -361,7 +361,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
        entry->de_blocknr = cpu_to_le64(blocknr);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(entry_bh);
+       mark_buffer_dirty(entry_bh);
        nilfs_mdt_mark_dirty(dat);
 
        brelse(entry_bh);
index 397e73258631aaf546ae34a80bbd2a07b0da548b..d7eeca62febd960e9a78604021526364158d0382 100644 (file)
@@ -111,7 +111,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        nilfs_transaction_commit(inode->i_sb);
 
  mapped:
-       SetPageChecked(page);
        wait_on_page_writeback(page);
        return VM_FAULT_LOCKED;
 }
index 1c2a3e23f8b2dec6b1f895098239864d7f5e9946..08a07a218d26ef40ecc87db0560cdfedd5648e8f 100644 (file)
@@ -48,9 +48,6 @@
 #include "dat.h"
 #include "ifile.h"
 
-static const struct address_space_operations def_gcinode_aops = {
-};
-
 /*
  * nilfs_gccache_submit_read_data() - add data buffer and submit read request
  * @inode - gc inode
@@ -87,9 +84,9 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
                goto out;
 
        if (pbn == 0) {
-               struct inode *dat_inode = NILFS_I_NILFS(inode)->ns_dat;
-                                         /* use original dat, not gc dat. */
-               err = nilfs_dat_translate(dat_inode, vbn, &pbn);
+               struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+
+               err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
                if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
                        brelse(bh);
                        goto failed;
@@ -103,7 +100,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
        }
 
        if (!buffer_mapped(bh)) {
-               bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
+               bh->b_bdev = inode->i_sb->s_bdev;
                set_buffer_mapped(bh);
        }
        bh->b_blocknr = pbn;
@@ -160,15 +157,11 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
        if (buffer_dirty(bh))
                return -EEXIST;
 
-       if (buffer_nilfs_node(bh)) {
-               if (nilfs_btree_broken_node_block(bh)) {
-                       clear_buffer_uptodate(bh);
-                       return -EIO;
-               }
-               nilfs_btnode_mark_dirty(bh);
-       } else {
-               nilfs_mark_buffer_dirty(bh);
+       if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
+               clear_buffer_uptodate(bh);
+               return -EIO;
        }
+       mark_buffer_dirty(bh);
        return 0;
 }
 
@@ -178,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
 
        inode->i_mode = S_IFREG;
        mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
-       inode->i_mapping->a_ops = &def_gcinode_aops;
+       inode->i_mapping->a_ops = &empty_aops;
        inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
 
        ii->i_flags = 0;
index bfc73d3a30ed438ef4900fd580c0ad0b0d2c2861..684d76300a80f2609ac3383f91a1b7d6307f41dd 100644 (file)
@@ -80,7 +80,7 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
                return ret;
        }
        nilfs_palloc_commit_alloc_entry(ifile, &req);
-       nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh);
+       mark_buffer_dirty(req.pr_entry_bh);
        nilfs_mdt_mark_dirty(ifile);
        *out_ino = (ino_t)req.pr_entry_nr;
        *out_bh = req.pr_entry_bh;
@@ -128,7 +128,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
        raw_inode->i_flags = 0;
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh);
+       mark_buffer_dirty(req.pr_entry_bh);
        brelse(req.pr_entry_bh);
 
        nilfs_palloc_commit_free_entry(ifile, &req);
index c0aa27490c027f40c40a12951caf38720f0ff417..587f18432832e542571ba49ad5f3fac6e8669721 100644 (file)
@@ -74,14 +74,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
                    struct buffer_head *bh_result, int create)
 {
        struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
        __u64 blknum = 0;
        int err = 0, ret;
-       struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
        unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
 
-       down_read(&NILFS_MDT(dat)->mi_sem);
+       down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
        ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
-       up_read(&NILFS_MDT(dat)->mi_sem);
+       up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
        if (ret >= 0) { /* found */
                map_bh(bh_result, inode->i_sb, blknum);
                if (ret > 0)
@@ -596,6 +596,16 @@ void nilfs_write_inode_common(struct inode *inode,
        raw_inode->i_flags = cpu_to_le32(ii->i_flags);
        raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 
+       if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
+               struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+
+               /* zero-fill unused portion in the case of super root block */
+               raw_inode->i_xattr = 0;
+               raw_inode->i_pad = 0;
+               memset((void *)raw_inode + sizeof(*raw_inode), 0,
+                      nilfs->ns_inode_size - sizeof(*raw_inode));
+       }
+
        if (has_bmap)
                nilfs_bmap_write(ii->i_bmap, raw_inode);
        else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
@@ -872,8 +882,7 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
                        return -EINVAL; /* NILFS_I_DIRTY may remain for
                                           freeing inode */
                }
-               list_del(&ii->i_dirty);
-               list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
+               list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
                set_bit(NILFS_I_QUEUED, &ii->i_state);
        }
        spin_unlock(&nilfs->ns_inode_lock);
@@ -892,7 +901,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
                return err;
        }
        nilfs_update_inode(inode, ibh);
-       nilfs_mdt_mark_buffer_dirty(ibh);
+       mark_buffer_dirty(ibh);
        nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
        brelse(ibh);
        return 0;
@@ -931,7 +940,7 @@ void nilfs_dirty_inode(struct inode *inode)
 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                 __u64 start, __u64 len)
 {
-       struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
+       struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
        __u64 logical = 0, phys = 0, size = 0;
        __u32 flags = 0;
        loff_t isize;
index f2469ba6246bd76458da3ce2b40121dd5f5d7d2b..41d6743d303c2a7923e5d09bc1df1fcf41514c29 100644 (file)
@@ -698,6 +698,63 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
        return 0;
 }
 
+static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
+                             void __user *argp)
+{
+       __u64 newsize;
+       int ret = -EPERM;
+
+       if (!capable(CAP_SYS_ADMIN))
+               goto out;
+
+       ret = mnt_want_write(filp->f_path.mnt);
+       if (ret)
+               goto out;
+
+       ret = -EFAULT;
+       if (copy_from_user(&newsize, argp, sizeof(newsize)))
+               goto out_drop_write;
+
+       ret = nilfs_resize_fs(inode->i_sb, newsize);
+
+out_drop_write:
+       mnt_drop_write(filp->f_path.mnt);
+out:
+       return ret;
+}
+
+static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
+{
+       struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+       __u64 range[2];
+       __u64 minseg, maxseg;
+       unsigned long segbytes;
+       int ret = -EPERM;
+
+       if (!capable(CAP_SYS_ADMIN))
+               goto out;
+
+       ret = -EFAULT;
+       if (copy_from_user(range, argp, sizeof(__u64[2])))
+               goto out;
+
+       ret = -ERANGE;
+       if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode))
+               goto out;
+
+       segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
+
+       minseg = range[0] + segbytes - 1;
+       do_div(minseg, segbytes);
+       maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
+       do_div(maxseg, segbytes);
+       maxseg--;
+
+       ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
+out:
+       return ret;
+}
+
 static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
                                unsigned int cmd, void __user *argp,
                                size_t membsz,
@@ -763,6 +820,10 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return nilfs_ioctl_clean_segments(inode, filp, cmd, argp);
        case NILFS_IOCTL_SYNC:
                return nilfs_ioctl_sync(inode, filp, cmd, argp);
+       case NILFS_IOCTL_RESIZE:
+               return nilfs_ioctl_resize(inode, filp, argp);
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               return nilfs_ioctl_set_alloc_range(inode, argp);
        default:
                return -ENOTTY;
        }
index a649b05f7069db7b0beb31b7ebf10e5639a245bd..800e8d78a83ba0aa33560b03f19d9f90a7116393 100644 (file)
@@ -66,7 +66,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
        kunmap_atomic(kaddr, KM_USER0);
 
        set_buffer_uptodate(bh);
-       nilfs_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(inode);
        return 0;
 }
@@ -355,7 +355,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
        err = nilfs_mdt_read_block(inode, block, 0, &bh);
        if (unlikely(err))
                return err;
-       nilfs_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(inode);
        brelse(bh);
        return 0;
@@ -450,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
 
        INIT_LIST_HEAD(&shadow->frozen_buffers);
        address_space_init_once(&shadow->frozen_data);
-       nilfs_mapping_init(&shadow->frozen_data, bdi);
+       nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
        address_space_init_once(&shadow->frozen_btnodes);
-       nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
+       nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
        mi->mi_shadow = shadow;
        return 0;
 }
index ed68563ec708bd135c7abe0145a960de771424fb..ab20a4baa50fa3a41ea735fe891848f8abfb581f 100644 (file)
@@ -64,11 +64,6 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
        return inode->i_private;
 }
 
-static inline struct the_nilfs *NILFS_I_NILFS(struct inode *inode)
-{
-       return inode->i_sb->s_fs_info;
-}
-
 /* Default GFP flags using highmem */
 #define NILFS_MDT_GFP      (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
 
@@ -93,8 +88,6 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh);
 struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
                                                struct buffer_head *bh);
 
-#define nilfs_mdt_mark_buffer_dirty(bh)        nilfs_mark_buffer_dirty(bh)
-
 static inline void nilfs_mdt_mark_dirty(struct inode *inode)
 {
        if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state))
@@ -108,7 +101,7 @@ static inline void nilfs_mdt_clear_dirty(struct inode *inode)
 
 static inline __u64 nilfs_mdt_cno(struct inode *inode)
 {
-       return NILFS_I_NILFS(inode)->ns_cno;
+       return ((struct the_nilfs *)inode->i_sb->s_fs_info)->ns_cno;
 }
 
 #define nilfs_mdt_bgl_lock(inode, bg) \
index a8dd344303cb7e785943a6fc2a90fad93c7f06fd..a9c6a531f80c5786ce628e3cbf0bd331ba86141f 100644 (file)
@@ -80,12 +80,6 @@ static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
        return &ii->vfs_inode;
 }
 
-static inline struct inode *NILFS_AS_I(struct address_space *mapping)
-{
-       return (mapping->host) ? :
-               container_of(mapping, struct inode, i_data);
-}
-
 /*
  * Dynamic state flags of NILFS on-memory inode (i_state)
  */
@@ -298,6 +292,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
                                               int flip);
 int nilfs_commit_super(struct super_block *sb, int flag);
 int nilfs_cleanup_super(struct super_block *sb);
+int nilfs_resize_fs(struct super_block *sb, __u64 newsize);
 int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
                            struct nilfs_root **root);
 int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno);
index 1168059c7efd5ae923c1e374a00b4813b13ec7b1..65221a04c6f090b9c7e479bd32579737da712129 100644 (file)
@@ -37,8 +37,7 @@
 
 #define NILFS_BUFFER_INHERENT_BITS  \
        ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
-        (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \
-        (1UL << BH_NILFS_Checked))
+        (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
 
 static struct buffer_head *
 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
@@ -59,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
        return bh;
 }
 
-/*
- * Since the page cache of B-tree node pages or data page cache of pseudo
- * inodes does not have a valid mapping->host pointer, calling
- * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
- * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
- * To avoid this problem, the old style mark_buffer_dirty() is used instead.
- */
-void nilfs_mark_buffer_dirty(struct buffer_head *bh)
-{
-       if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
-               __set_page_dirty_nobuffers(bh->b_page);
-}
-
 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
                                      struct address_space *mapping,
                                      unsigned long blkoff,
@@ -183,7 +169,7 @@ int nilfs_page_buffers_clean(struct page *page)
 void nilfs_page_bug(struct page *page)
 {
        struct address_space *m;
-       unsigned long ino = 0;
+       unsigned long ino;
 
        if (unlikely(!page)) {
                printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
@@ -191,11 +177,8 @@ void nilfs_page_bug(struct page *page)
        }
 
        m = page->mapping;
-       if (m) {
-               struct inode *inode = NILFS_AS_I(m);
-               if (inode != NULL)
-                       ino = inode->i_ino;
-       }
+       ino = m ? m->host->i_ino : 0;
+
        printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
               "mapping=%p ino=%lu\n",
               page, atomic_read(&page->_count),
@@ -216,56 +199,6 @@ void nilfs_page_bug(struct page *page)
        }
 }
 
-/**
- * nilfs_alloc_private_page - allocate a private page with buffer heads
- *
- * Return Value: On success, a pointer to the allocated page is returned.
- * On error, NULL is returned.
- */
-struct page *nilfs_alloc_private_page(struct block_device *bdev, int size,
-                                     unsigned long state)
-{
-       struct buffer_head *bh, *head, *tail;
-       struct page *page;
-
-       page = alloc_page(GFP_NOFS); /* page_count of the returned page is 1 */
-       if (unlikely(!page))
-               return NULL;
-
-       lock_page(page);
-       head = alloc_page_buffers(page, size, 0);
-       if (unlikely(!head)) {
-               unlock_page(page);
-               __free_page(page);
-               return NULL;
-       }
-
-       bh = head;
-       do {
-               bh->b_state = (1UL << BH_NILFS_Allocated) | state;
-               tail = bh;
-               bh->b_bdev = bdev;
-               bh = bh->b_this_page;
-       } while (bh);
-
-       tail->b_this_page = head;
-       attach_page_buffers(page, head);
-
-       return page;
-}
-
-void nilfs_free_private_page(struct page *page)
-{
-       BUG_ON(!PageLocked(page));
-       BUG_ON(page->mapping);
-
-       if (page_has_buffers(page) && !try_to_free_buffers(page))
-               NILFS_PAGE_BUG(page, "failed to free page");
-
-       unlock_page(page);
-       __free_page(page);
-}
-
 /**
  * nilfs_copy_page -- copy the page with buffers
  * @dst: destination page
@@ -492,10 +425,10 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
        return nc;
 }
 
-void nilfs_mapping_init(struct address_space *mapping,
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
                        struct backing_dev_info *bdi)
 {
-       mapping->host = NULL;
+       mapping->host = inode;
        mapping->flags = 0;
        mapping_set_gfp_mask(mapping, GFP_NOFS);
        mapping->assoc_mapping = NULL;
index f06b79ad7493160877b1b82e45bc2f2acfd9b02f..fb7de71605a076aea758d674603c2e0f94a09008 100644 (file)
@@ -38,14 +38,12 @@ enum {
        BH_NILFS_Redirected,
 };
 
-BUFFER_FNS(NILFS_Allocated, nilfs_allocated)   /* nilfs private buffers */
 BUFFER_FNS(NILFS_Node, nilfs_node)             /* nilfs node buffers */
 BUFFER_FNS(NILFS_Volatile, nilfs_volatile)
 BUFFER_FNS(NILFS_Checked, nilfs_checked)       /* buffer is verified */
 BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
 
 
-void nilfs_mark_buffer_dirty(struct buffer_head *bh);
 int __nilfs_clear_page_dirty(struct page *);
 
 struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
@@ -54,14 +52,11 @@ void nilfs_forget_buffer(struct buffer_head *);
 void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *);
 int nilfs_page_buffers_clean(struct page *);
 void nilfs_page_bug(struct page *);
-struct page *nilfs_alloc_private_page(struct block_device *, int,
-                                     unsigned long);
-void nilfs_free_private_page(struct page *);
 
 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
 void nilfs_clear_dirty_pages(struct address_space *);
-void nilfs_mapping_init(struct address_space *mapping,
+void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
                        struct backing_dev_info *bdi);
 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
index ba4a64518f389d6dcad64830050d4fdf635746a8..a604ac0331b230cc300ddace76a4ad77efc97bea 100644 (file)
@@ -387,9 +387,9 @@ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
 static void dispose_recovery_list(struct list_head *head)
 {
        while (!list_empty(head)) {
-               struct nilfs_recovery_block *rb
-                       = list_entry(head->next,
-                                    struct nilfs_recovery_block, list);
+               struct nilfs_recovery_block *rb;
+
+               rb = list_first_entry(head, struct nilfs_recovery_block, list);
                list_del(&rb->list);
                kfree(rb);
        }
@@ -416,9 +416,9 @@ static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
 void nilfs_dispose_segment_list(struct list_head *head)
 {
        while (!list_empty(head)) {
-               struct nilfs_segment_entry *ent
-                       = list_entry(head->next,
-                                    struct nilfs_segment_entry, list);
+               struct nilfs_segment_entry *ent;
+
+               ent = list_first_entry(head, struct nilfs_segment_entry, list);
                list_del(&ent->list);
                kfree(ent);
        }
index 2853ff20f85a2b30f8f6ac9a9c36e880bebf845c..850a7c0228fbd949be2e861faaed9cc79bbe7564 100644 (file)
@@ -239,12 +239,15 @@ nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
                                    u32 seed)
 {
        struct nilfs_super_root *raw_sr;
+       struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
+       unsigned srsize;
        u32 crc;
 
        raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
+       srsize = NILFS_SR_BYTES(nilfs->ns_inode_size);
        crc = crc32_le(seed,
                       (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
-                      NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
+                      srsize - sizeof(raw_sr->sr_sum));
        raw_sr->sr_sum = cpu_to_le32(crc);
 }
 
@@ -254,18 +257,6 @@ static void nilfs_release_buffers(struct list_head *list)
 
        list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
                list_del_init(&bh->b_assoc_buffers);
-               if (buffer_nilfs_allocated(bh)) {
-                       struct page *clone_page = bh->b_page;
-
-                       /* remove clone page */
-                       brelse(bh);
-                       page_cache_release(clone_page); /* for each bh */
-                       if (page_count(clone_page) <= 2) {
-                               lock_page(clone_page);
-                               nilfs_free_private_page(clone_page);
-                       }
-                       continue;
-               }
                brelse(bh);
        }
 }
index afe4f2183454d49a4be19b902c200b1922ebcc4e..141646e88fb5a9e3d5103ed69d3eb4880b33963e 100644 (file)
@@ -655,13 +655,10 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
                if (unlikely(page->index > last))
                        break;
 
-               if (mapping->host) {
-                       lock_page(page);
-                       if (!page_has_buffers(page))
-                               create_empty_buffers(page,
-                                                    1 << inode->i_blkbits, 0);
-                       unlock_page(page);
-               }
+               lock_page(page);
+               if (!page_has_buffers(page))
+                       create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+               unlock_page(page);
 
                bh = head = page_buffers(page);
                do {
@@ -809,7 +806,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
                /* The following code is duplicated with cpfile.  But, it is
                   needed to collect the checkpoint even if it was not newly
                   created */
-               nilfs_mdt_mark_buffer_dirty(bh_cp);
+               mark_buffer_dirty(bh_cp);
                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
                nilfs_cpfile_put_checkpoint(
                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
@@ -889,12 +886,14 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
 {
        struct buffer_head *bh_sr;
        struct nilfs_super_root *raw_sr;
-       unsigned isz = nilfs->ns_inode_size;
+       unsigned isz, srsz;
 
        bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
        raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
+       isz = nilfs->ns_inode_size;
+       srsz = NILFS_SR_BYTES(isz);
 
-       raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
+       raw_sr->sr_bytes = cpu_to_le16(srsz);
        raw_sr->sr_nongc_ctime
                = cpu_to_le64(nilfs_doing_gc() ?
                              nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
@@ -906,6 +905,7 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
                                 NILFS_SR_CPFILE_OFFSET(isz), 1);
        nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
                                 NILFS_SR_SUFILE_OFFSET(isz), 1);
+       memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
 }
 
 static void nilfs_redirty_inodes(struct list_head *head)
@@ -954,8 +954,8 @@ static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
 
  dispose_buffers:
        while (!list_empty(listp)) {
-               bh = list_entry(listp->next, struct buffer_head,
-                               b_assoc_buffers);
+               bh = list_first_entry(listp, struct buffer_head,
+                                     b_assoc_buffers);
                list_del_init(&bh->b_assoc_buffers);
                brelse(bh);
        }
@@ -1500,10 +1500,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
                        nblocks = le32_to_cpu(finfo->fi_nblocks);
                        ndatablk = le32_to_cpu(finfo->fi_ndatablk);
 
-                       if (buffer_nilfs_node(bh))
-                               inode = NILFS_BTNC_I(bh->b_page->mapping);
-                       else
-                               inode = NILFS_AS_I(bh->b_page->mapping);
+                       inode = bh->b_page->mapping->host;
 
                        if (mode == SC_LSEG_DSYNC)
                                sc_op = &nilfs_sc_dsync_ops;
@@ -1556,83 +1553,24 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
        return 0;
 }
 
-static int
-nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
-{
-       struct page *clone_page;
-       struct buffer_head *bh, *head, *bh2;
-       void *kaddr;
-
-       bh = head = page_buffers(page);
-
-       clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
-       if (unlikely(!clone_page))
-               return -ENOMEM;
-
-       bh2 = page_buffers(clone_page);
-       kaddr = kmap_atomic(page, KM_USER0);
-       do {
-               if (list_empty(&bh->b_assoc_buffers))
-                       continue;
-               get_bh(bh2);
-               page_cache_get(clone_page); /* for each bh */
-               memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
-               bh2->b_blocknr = bh->b_blocknr;
-               list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
-               list_add_tail(&bh->b_assoc_buffers, out);
-       } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
-       kunmap_atomic(kaddr, KM_USER0);
-
-       if (!TestSetPageWriteback(clone_page))
-               account_page_writeback(clone_page);
-       unlock_page(clone_page);
-
-       return 0;
-}
-
-static int nilfs_test_page_to_be_frozen(struct page *page)
-{
-       struct address_space *mapping = page->mapping;
-
-       if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
-               return 0;
-
-       if (page_mapped(page)) {
-               ClearPageChecked(page);
-               return 1;
-       }
-       return PageChecked(page);
-}
-
-static int nilfs_begin_page_io(struct page *page, struct list_head *out)
+static void nilfs_begin_page_io(struct page *page)
 {
        if (!page || PageWriteback(page))
                /* For split b-tree node pages, this function may be called
                   twice.  We ignore the 2nd or later calls by this check. */
-               return 0;
+               return;
 
        lock_page(page);
        clear_page_dirty_for_io(page);
        set_page_writeback(page);
        unlock_page(page);
-
-       if (nilfs_test_page_to_be_frozen(page)) {
-               int err = nilfs_copy_replace_page_buffers(page, out);
-               if (unlikely(err))
-                       return err;
-       }
-       return 0;
 }
 
-static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
-                                      struct page **failed_page)
+static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 {
        struct nilfs_segment_buffer *segbuf;
        struct page *bd_page = NULL, *fs_page = NULL;
-       struct list_head *list = &sci->sc_copied_buffers;
-       int err;
 
-       *failed_page = NULL;
        list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
                struct buffer_head *bh;
 
@@ -1662,11 +1600,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
                                break;
                        }
                        if (bh->b_page != fs_page) {
-                               err = nilfs_begin_page_io(fs_page, list);
-                               if (unlikely(err)) {
-                                       *failed_page = fs_page;
-                                       goto out;
-                               }
+                               nilfs_begin_page_io(fs_page);
                                fs_page = bh->b_page;
                        }
                }
@@ -1677,11 +1611,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
                set_page_writeback(bd_page);
                unlock_page(bd_page);
        }
-       err = nilfs_begin_page_io(fs_page, list);
-       if (unlikely(err))
-               *failed_page = fs_page;
- out:
-       return err;
+       nilfs_begin_page_io(fs_page);
 }
 
 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
@@ -1694,24 +1624,6 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci,
        return ret;
 }
 
-static void __nilfs_end_page_io(struct page *page, int err)
-{
-       if (!err) {
-               if (!nilfs_page_buffers_clean(page))
-                       __set_page_dirty_nobuffers(page);
-               ClearPageError(page);
-       } else {
-               __set_page_dirty_nobuffers(page);
-               SetPageError(page);
-       }
-
-       if (buffer_nilfs_allocated(page_buffers(page))) {
-               if (TestClearPageWriteback(page))
-                       dec_zone_page_state(page, NR_WRITEBACK);
-       } else
-               end_page_writeback(page);
-}
-
 static void nilfs_end_page_io(struct page *page, int err)
 {
        if (!page)
@@ -1738,40 +1650,19 @@ static void nilfs_end_page_io(struct page *page, int err)
                return;
        }
 
-       __nilfs_end_page_io(page, err);
-}
-
-static void nilfs_clear_copied_buffers(struct list_head *list, int err)
-{
-       struct buffer_head *bh, *head;
-       struct page *page;
-
-       while (!list_empty(list)) {
-               bh = list_entry(list->next, struct buffer_head,
-                               b_assoc_buffers);
-               page = bh->b_page;
-               page_cache_get(page);
-               head = bh = page_buffers(page);
-               do {
-                       if (!list_empty(&bh->b_assoc_buffers)) {
-                               list_del_init(&bh->b_assoc_buffers);
-                               if (!err) {
-                                       set_buffer_uptodate(bh);
-                                       clear_buffer_dirty(bh);
-                                       clear_buffer_delay(bh);
-                                       clear_buffer_nilfs_volatile(bh);
-                               }
-                               brelse(bh); /* for b_assoc_buffers */
-                       }
-               } while ((bh = bh->b_this_page) != head);
-
-               __nilfs_end_page_io(page, err);
-               page_cache_release(page);
+       if (!err) {
+               if (!nilfs_page_buffers_clean(page))
+                       __set_page_dirty_nobuffers(page);
+               ClearPageError(page);
+       } else {
+               __set_page_dirty_nobuffers(page);
+               SetPageError(page);
        }
+
+       end_page_writeback(page);
 }
 
-static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
-                            int err)
+static void nilfs_abort_logs(struct list_head *logs, int err)
 {
        struct nilfs_segment_buffer *segbuf;
        struct page *bd_page = NULL, *fs_page = NULL;
@@ -1801,8 +1692,6 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
                        }
                        if (bh->b_page != fs_page) {
                                nilfs_end_page_io(fs_page, err);
-                               if (fs_page && fs_page == failed_page)
-                                       return;
                                fs_page = bh->b_page;
                        }
                }
@@ -1821,12 +1710,11 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
 
        list_splice_tail_init(&sci->sc_write_logs, &logs);
        ret = nilfs_wait_on_logs(&logs);
-       nilfs_abort_logs(&logs, NULL, ret ? : err);
+       nilfs_abort_logs(&logs, ret ? : err);
 
        list_splice_tail_init(&sci->sc_segbufs, &logs);
        nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
        nilfs_free_incomplete_logs(&logs, nilfs);
-       nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
 
        if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
                ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
@@ -1920,8 +1808,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
 
        nilfs_end_page_io(fs_page, 0);
 
-       nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
-
        nilfs_drop_collected_inodes(&sci->sc_dirty_files);
 
        if (nilfs_doing_gc())
@@ -1979,7 +1865,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
                                              "failed to get inode block.\n");
                                return err;
                        }
-                       nilfs_mdt_mark_buffer_dirty(ibh);
+                       mark_buffer_dirty(ibh);
                        nilfs_mdt_mark_dirty(ifile);
                        spin_lock(&nilfs->ns_inode_lock);
                        if (likely(!ii->i_bh))
@@ -1991,8 +1877,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 
                clear_bit(NILFS_I_QUEUED, &ii->i_state);
                set_bit(NILFS_I_BUSY, &ii->i_state);
-               list_del(&ii->i_dirty);
-               list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
+               list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
        }
        spin_unlock(&nilfs->ns_inode_lock);
 
@@ -2014,8 +1899,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                clear_bit(NILFS_I_BUSY, &ii->i_state);
                brelse(ii->i_bh);
                ii->i_bh = NULL;
-               list_del(&ii->i_dirty);
-               list_add_tail(&ii->i_dirty, &ti->ti_garbage);
+               list_move_tail(&ii->i_dirty, &ti->ti_garbage);
        }
        spin_unlock(&nilfs->ns_inode_lock);
 }
@@ -2026,7 +1910,6 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
 {
        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
-       struct page *failed_page;
        int err;
 
        sci->sc_stage.scnt = NILFS_ST_INIT;
@@ -2081,11 +1964,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
 
                /* Write partial segments */
-               err = nilfs_segctor_prepare_write(sci, &failed_page);
-               if (err) {
-                       nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
-                       goto failed_to_write;
-               }
+               nilfs_segctor_prepare_write(sci);
 
                nilfs_add_checksums_on_logs(&sci->sc_segbufs,
                                            nilfs->ns_crc_seed);
@@ -2687,7 +2566,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        INIT_LIST_HEAD(&sci->sc_segbufs);
        INIT_LIST_HEAD(&sci->sc_write_logs);
        INIT_LIST_HEAD(&sci->sc_gc_inodes);
-       INIT_LIST_HEAD(&sci->sc_copied_buffers);
        init_timer(&sci->sc_timer);
 
        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2741,8 +2619,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
        if (flag || !nilfs_segctor_confirm(sci))
                nilfs_segctor_write_out(sci);
 
-       WARN_ON(!list_empty(&sci->sc_copied_buffers));
-
        if (!list_empty(&sci->sc_dirty_files)) {
                nilfs_warning(sci->sc_super, __func__,
                              "dirty file(s) after the final construction\n");
index 6c02a86745fb19417d093da0af0ed205ac00a1a7..38a1d0013314395938ceb78fd1f1e906705206f1 100644 (file)
@@ -92,7 +92,6 @@ struct nilfs_segsum_pointer {
  * @sc_nblk_inc: Block count of current generation
  * @sc_dirty_files: List of files to be written
  * @sc_gc_inodes: List of GC inodes having blocks to be written
- * @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data
  * @sc_freesegs: array of segment numbers to be freed
  * @sc_nfreesegs: number of segments on @sc_freesegs
  * @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -136,7 +135,6 @@ struct nilfs_sc_info {
 
        struct list_head        sc_dirty_files;
        struct list_head        sc_gc_inodes;
-       struct list_head        sc_copied_buffers;
 
        __u64                  *sc_freesegs;
        size_t                  sc_nfreesegs;
index 1d6f488ccae80706a05abddcbfb96719c2f4645b..0a0aba617d8abf21cd4f25255caaf8d66383bc73 100644 (file)
@@ -33,7 +33,9 @@
 
 struct nilfs_sufile_info {
        struct nilfs_mdt_info mi;
-       unsigned long ncleansegs;
+       unsigned long ncleansegs;/* number of clean segments */
+       __u64 allocmin;         /* lower limit of allocatable segment range */
+       __u64 allocmax;         /* upper limit of allocatable segment range */
 };
 
 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
@@ -96,6 +98,13 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
                                   create, NULL, bhp);
 }
 
+static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
+                                                  __u64 segnum)
+{
+       return nilfs_mdt_delete_block(sufile,
+                                     nilfs_sufile_get_blkoff(sufile, segnum));
+}
+
 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
                                     u64 ncleanadd, u64 ndirtyadd)
 {
@@ -108,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
        le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(header_bh);
 }
 
 /**
@@ -247,6 +256,35 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
        return ret;
 }
 
+/**
+ * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
+ * @sufile: inode of segment usage file
+ * @start: minimum segment number of allocatable region (inclusive)
+ * @end: maximum segment number of allocatable region (inclusive)
+ *
+ * Return Value: On success, 0 is returned.  On error, one of the
+ * following negative error codes is returned.
+ *
+ * %-ERANGE - invalid segment region
+ */
+int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
+{
+       struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
+       __u64 nsegs;
+       int ret = -ERANGE;
+
+       down_write(&NILFS_MDT(sufile)->mi_sem);
+       nsegs = nilfs_sufile_get_nsegments(sufile);
+
+       if (start <= end && end < nsegs) {
+               sui->allocmin = start;
+               sui->allocmax = end;
+               ret = 0;
+       }
+       up_write(&NILFS_MDT(sufile)->mi_sem);
+       return ret;
+}
+
 /**
  * nilfs_sufile_alloc - allocate a segment
  * @sufile: inode of segment usage file
@@ -269,11 +307,12 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
        struct buffer_head *header_bh, *su_bh;
        struct nilfs_sufile_header *header;
        struct nilfs_segment_usage *su;
+       struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
        size_t susz = NILFS_MDT(sufile)->mi_entry_size;
        __u64 segnum, maxsegnum, last_alloc;
        void *kaddr;
-       unsigned long nsegments, ncleansegs, nsus;
-       int ret, i, j;
+       unsigned long nsegments, ncleansegs, nsus, cnt;
+       int ret, j;
 
        down_write(&NILFS_MDT(sufile)->mi_sem);
 
@@ -287,13 +326,31 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
        kunmap_atomic(kaddr, KM_USER0);
 
        nsegments = nilfs_sufile_get_nsegments(sufile);
+       maxsegnum = sui->allocmax;
        segnum = last_alloc + 1;
-       maxsegnum = nsegments - 1;
-       for (i = 0; i < nsegments; i += nsus) {
-               if (segnum >= nsegments) {
-                       /* wrap around */
-                       segnum = 0;
-                       maxsegnum = last_alloc;
+       if (segnum < sui->allocmin || segnum > sui->allocmax)
+               segnum = sui->allocmin;
+
+       for (cnt = 0; cnt < nsegments; cnt += nsus) {
+               if (segnum > maxsegnum) {
+                       if (cnt < sui->allocmax - sui->allocmin + 1) {
+                               /*
+                                * wrap around in the limited region.
+                                * if allocation started from
+                                * sui->allocmin, this never happens.
+                                */
+                               segnum = sui->allocmin;
+                               maxsegnum = last_alloc;
+                       } else if (segnum > sui->allocmin &&
+                                  sui->allocmax + 1 < nsegments) {
+                               segnum = sui->allocmax + 1;
+                               maxsegnum = nsegments - 1;
+                       } else if (sui->allocmin > 0)  {
+                               segnum = 0;
+                               maxsegnum = sui->allocmin - 1;
+                       } else {
+                               break; /* never happens */
+                       }
                }
                ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
                                                           &su_bh);
@@ -319,9 +376,9 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                        header->sh_last_alloc = cpu_to_le64(segnum);
                        kunmap_atomic(kaddr, KM_USER0);
 
-                       NILFS_SUI(sufile)->ncleansegs--;
-                       nilfs_mdt_mark_buffer_dirty(header_bh);
-                       nilfs_mdt_mark_buffer_dirty(su_bh);
+                       sui->ncleansegs--;
+                       mark_buffer_dirty(header_bh);
+                       mark_buffer_dirty(su_bh);
                        nilfs_mdt_mark_dirty(sufile);
                        brelse(su_bh);
                        *segnump = segnum;
@@ -364,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
        nilfs_sufile_mod_counter(header_bh, -1, 1);
        NILFS_SUI(sufile)->ncleansegs--;
 
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
@@ -395,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
        NILFS_SUI(sufile)->ncleansegs -= clean;
 
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
@@ -421,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
        sudirty = nilfs_segment_usage_dirty(su);
        nilfs_segment_usage_set_clean(su);
        kunmap_atomic(kaddr, KM_USER0);
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
 
        nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
        NILFS_SUI(sufile)->ncleansegs++;
@@ -441,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
 
        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
        if (!ret) {
-               nilfs_mdt_mark_buffer_dirty(bh);
+               mark_buffer_dirty(bh);
                nilfs_mdt_mark_dirty(sufile);
                brelse(bh);
        }
@@ -476,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
        su->su_nblocks = cpu_to_le32(nblocks);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(sufile);
        brelse(bh);
 
@@ -505,7 +562,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
 {
        struct buffer_head *header_bh;
        struct nilfs_sufile_header *header;
-       struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
        void *kaddr;
        int ret;
 
@@ -555,10 +612,182 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
                nilfs_sufile_mod_counter(header_bh, -1, 0);
                NILFS_SUI(sufile)->ncleansegs--;
        }
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
+/**
+  * nilfs_sufile_truncate_range - truncate range of segment array
+  * @sufile: inode of segment usage file
+  * @start: start segment number (inclusive)
+  * @end: end segment number (inclusive)
+  *
+  * Return Value: On success, 0 is returned.  On error, one of the
+  * following negative error codes is returned.
+  *
+  * %-EIO - I/O error.
+  *
+  * %-ENOMEM - Insufficient amount of memory available.
+  *
+  * %-EINVAL - Invalid number of segments specified
+  *
+  * %-EBUSY - Dirty or active segments are present in the range
+  */
+static int nilfs_sufile_truncate_range(struct inode *sufile,
+                                      __u64 start, __u64 end)
+{
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+       struct buffer_head *header_bh;
+       struct buffer_head *su_bh;
+       struct nilfs_segment_usage *su, *su2;
+       size_t susz = NILFS_MDT(sufile)->mi_entry_size;
+       unsigned long segusages_per_block;
+       unsigned long nsegs, ncleaned;
+       __u64 segnum;
+       void *kaddr;
+       ssize_t n, nc;
+       int ret;
+       int j;
+
+       nsegs = nilfs_sufile_get_nsegments(sufile);
+
+       ret = -EINVAL;
+       if (start > end || start >= nsegs)
+               goto out;
+
+       ret = nilfs_sufile_get_header_block(sufile, &header_bh);
+       if (ret < 0)
+               goto out;
+
+       segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
+       ncleaned = 0;
+
+       for (segnum = start; segnum <= end; segnum += n) {
+               n = min_t(unsigned long,
+                         segusages_per_block -
+                                 nilfs_sufile_get_offset(sufile, segnum),
+                         end - segnum + 1);
+               ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
+                                                          &su_bh);
+               if (ret < 0) {
+                       if (ret != -ENOENT)
+                               goto out_header;
+                       /* hole */
+                       continue;
+               }
+               kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
+               su = nilfs_sufile_block_get_segment_usage(
+                       sufile, segnum, su_bh, kaddr);
+               su2 = su;
+               for (j = 0; j < n; j++, su = (void *)su + susz) {
+                       if ((le32_to_cpu(su->su_flags) &
+                            ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
+                           nilfs_segment_is_active(nilfs, segnum + j)) {
+                               ret = -EBUSY;
+                               kunmap_atomic(kaddr, KM_USER0);
+                               brelse(su_bh);
+                               goto out_header;
+                       }
+               }
+               nc = 0;
+               for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
+                       if (nilfs_segment_usage_error(su)) {
+                               nilfs_segment_usage_set_clean(su);
+                               nc++;
+                       }
+               }
+               kunmap_atomic(kaddr, KM_USER0);
+               if (nc > 0) {
+                       mark_buffer_dirty(su_bh);
+                       ncleaned += nc;
+               }
+               brelse(su_bh);
+
+               if (n == segusages_per_block) {
+                       /* make hole */
+                       nilfs_sufile_delete_segment_usage_block(sufile, segnum);
+               }
+       }
+       ret = 0;
+
+out_header:
+       if (ncleaned > 0) {
+               NILFS_SUI(sufile)->ncleansegs += ncleaned;
+               nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
+               nilfs_mdt_mark_dirty(sufile);
+       }
+       brelse(header_bh);
+out:
+       return ret;
+}
+
+/**
+ * nilfs_sufile_resize - resize segment array
+ * @sufile: inode of segment usage file
+ * @newnsegs: new number of segments
+ *
+ * Return Value: On success, 0 is returned.  On error, one of the
+ * following negative error codes is returned.
+ *
+ * %-EIO - I/O error.
+ *
+ * %-ENOMEM - Insufficient amount of memory available.
+ *
+ * %-ENOSPC - Enough free space is not left for shrinking
+ *
+ * %-EBUSY - Dirty or active segments exist in the region to be truncated
+ */
+int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
+{
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+       struct buffer_head *header_bh;
+       struct nilfs_sufile_header *header;
+       struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
+       void *kaddr;
+       unsigned long nsegs, nrsvsegs;
+       int ret = 0;
+
+       down_write(&NILFS_MDT(sufile)->mi_sem);
+
+       nsegs = nilfs_sufile_get_nsegments(sufile);
+       if (nsegs == newnsegs)
+               goto out;
+
+       ret = -ENOSPC;
+       nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
+       if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
+               goto out;
+
+       ret = nilfs_sufile_get_header_block(sufile, &header_bh);
+       if (ret < 0)
+               goto out;
+
+       if (newnsegs > nsegs) {
+               sui->ncleansegs += newnsegs - nsegs;
+       } else /* newnsegs < nsegs */ {
+               ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
+               if (ret < 0)
+                       goto out_header;
+
+               sui->ncleansegs -= nsegs - newnsegs;
+       }
+
+       kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+       header = kaddr + bh_offset(header_bh);
+       header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
+       kunmap_atomic(kaddr, KM_USER0);
+
+       mark_buffer_dirty(header_bh);
+       nilfs_mdt_mark_dirty(sufile);
+       nilfs_set_nsegments(nilfs, newnsegs);
+
+out_header:
+       brelse(header_bh);
+out:
+       up_write(&NILFS_MDT(sufile)->mi_sem);
+       return ret;
+}
+
 /**
  * nilfs_sufile_get_suinfo -
  * @sufile: inode of segment usage file
@@ -583,7 +812,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
        struct nilfs_segment_usage *su;
        struct nilfs_suinfo *si = buf;
        size_t susz = NILFS_MDT(sufile)->mi_entry_size;
-       struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
+       struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
        void *kaddr;
        unsigned long nsegs, segusages_per_block;
        ssize_t n;
@@ -679,6 +908,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
        kunmap_atomic(kaddr, KM_USER0);
        brelse(header_bh);
 
+       sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
+       sui->allocmin = 0;
+
        unlock_new_inode(sufile);
  out:
        *inodep = sufile;
index a943fbacb45b8021f1a11e12657662d92dca613c..e84bc5b51fc11df1090084ba0d0eb25c342fb2ad 100644 (file)
 
 static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
 {
-       return NILFS_I_NILFS(sufile)->ns_nsegments;
+       return ((struct the_nilfs *)sufile->i_sb->s_fs_info)->ns_nsegments;
 }
 
 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
 
+int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end);
 int nilfs_sufile_alloc(struct inode *, __u64 *);
 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
@@ -61,6 +62,7 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
 void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
                               struct buffer_head *);
 
+int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs);
 int nilfs_sufile_read(struct super_block *sb, size_t susize,
                      struct nilfs_inode *raw_inode, struct inode **inodep);
 
index 062cca065195ea7f430f45cf9259896be44be788..8351c44a7320f06902bf03dcc114ade367c22fbe 100644 (file)
@@ -56,6 +56,7 @@
 #include "btnode.h"
 #include "page.h"
 #include "cpfile.h"
+#include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */
 #include "ifile.h"
 #include "dat.h"
 #include "segment.h"
@@ -165,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
        ii->i_state = 0;
        ii->i_cno = 0;
        ii->vfs_inode.i_version = 1;
-       nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi);
+       nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
        return &ii->vfs_inode;
 }
 
@@ -347,6 +348,134 @@ int nilfs_cleanup_super(struct super_block *sb)
        return ret;
 }
 
+/**
+ * nilfs_move_2nd_super - relocate secondary super block
+ * @sb: super block instance
+ * @sb2off: new offset of the secondary super block (in bytes)
+ */
+static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
+{
+       struct the_nilfs *nilfs = sb->s_fs_info;
+       struct buffer_head *nsbh;
+       struct nilfs_super_block *nsbp;
+       sector_t blocknr, newblocknr;
+       unsigned long offset;
+       int sb2i = -1;  /* array index of the secondary superblock */
+       int ret = 0;
+
+       /* nilfs->ns_sem must be locked by the caller. */
+       if (nilfs->ns_sbh[1] &&
+           nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
+               sb2i = 1;
+               blocknr = nilfs->ns_sbh[1]->b_blocknr;
+       } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
+               sb2i = 0;
+               blocknr = nilfs->ns_sbh[0]->b_blocknr;
+       }
+       if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off)
+               goto out;  /* super block location is unchanged */
+
+       /* Get new super block buffer */
+       newblocknr = sb2off >> nilfs->ns_blocksize_bits;
+       offset = sb2off & (nilfs->ns_blocksize - 1);
+       nsbh = sb_getblk(sb, newblocknr);
+       if (!nsbh) {
+               printk(KERN_WARNING
+                      "NILFS warning: unable to move secondary superblock "
+                      "to block %llu\n", (unsigned long long)newblocknr);
+               ret = -EIO;
+               goto out;
+       }
+       nsbp = (void *)nsbh->b_data + offset;
+       memset(nsbp, 0, nilfs->ns_blocksize);
+
+       if (sb2i >= 0) {
+               memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
+               brelse(nilfs->ns_sbh[sb2i]);
+               nilfs->ns_sbh[sb2i] = nsbh;
+               nilfs->ns_sbp[sb2i] = nsbp;
+       } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) {
+               /* secondary super block will be restored to index 1 */
+               nilfs->ns_sbh[1] = nsbh;
+               nilfs->ns_sbp[1] = nsbp;
+       } else {
+               brelse(nsbh);
+       }
+out:
+       return ret;
+}
+
+/**
+ * nilfs_resize_fs - resize the filesystem
+ * @sb: super block instance
+ * @newsize: new size of the filesystem (in bytes)
+ */
+int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
+{
+       struct the_nilfs *nilfs = sb->s_fs_info;
+       struct nilfs_super_block **sbp;
+       __u64 devsize, newnsegs;
+       loff_t sb2off;
+       int ret;
+
+       ret = -ERANGE;
+       devsize = i_size_read(sb->s_bdev->bd_inode);
+       if (newsize > devsize)
+               goto out;
+
+       /*
+        * Write lock is required to protect some functions depending
+        * on the number of segments, the number of reserved segments,
+        * and so forth.
+        */
+       down_write(&nilfs->ns_segctor_sem);
+
+       sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
+       newnsegs = sb2off >> nilfs->ns_blocksize_bits;
+       do_div(newnsegs, nilfs->ns_blocks_per_segment);
+
+       ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
+       up_write(&nilfs->ns_segctor_sem);
+       if (ret < 0)
+               goto out;
+
+       ret = nilfs_construct_segment(sb);
+       if (ret < 0)
+               goto out;
+
+       down_write(&nilfs->ns_sem);
+       nilfs_move_2nd_super(sb, sb2off);
+       ret = -EIO;
+       sbp = nilfs_prepare_super(sb, 0);
+       if (likely(sbp)) {
+               nilfs_set_log_cursor(sbp[0], nilfs);
+               /*
+                * Drop NILFS_RESIZE_FS flag for compatibility with
+                * mount-time resize which may be implemented in a
+                * future release.
+                */
+               sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
+                                             ~NILFS_RESIZE_FS);
+               sbp[0]->s_dev_size = cpu_to_le64(newsize);
+               sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
+               if (sbp[1])
+                       memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+               ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
+       }
+       up_write(&nilfs->ns_sem);
+
+       /*
+        * Reset the range of allocatable segments last.  This order
+        * is important in the case of expansion because the secondary
+        * superblock must be protected from log write until migration
+        * completes.
+        */
+       if (!ret)
+               nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
+out:
+       return ret;
+}
+
 static void nilfs_put_super(struct super_block *sb)
 {
        struct the_nilfs *nilfs = sb->s_fs_info;
index d2acd1a651f31caf260d964c719fed086b43cb85..d32714094375b72695bcacdfcf7cdd866be2ff05 100644 (file)
@@ -363,6 +363,24 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
        return res;
 }
 
+/**
+ * nilfs_nrsvsegs - calculate the number of reserved segments
+ * @nilfs: nilfs object
+ * @nsegs: total number of segments
+ */
+unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
+{
+       return max_t(unsigned long, NILFS_MIN_NRSVSEGS,
+                    DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage,
+                                 100));
+}
+
+void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
+{
+       nilfs->ns_nsegments = nsegs;
+       nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs);
+}
+
 static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
                                   struct nilfs_super_block *sbp)
 {
@@ -389,13 +407,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
        }
 
        nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
-       nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments);
        nilfs->ns_r_segments_percentage =
                le32_to_cpu(sbp->s_r_segments_percentage);
-       nilfs->ns_nrsvsegs =
-               max_t(unsigned long, NILFS_MIN_NRSVSEGS,
-                     DIV_ROUND_UP(nilfs->ns_nsegments *
-                                  nilfs->ns_r_segments_percentage, 100));
+       nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
        nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
        return 0;
 }
index f4968145c2a3ee22bdbff5fb7d4e2dd40f6fd392..9992b11312ff5918b9509eabfdb0cab1ecbecd94 100644 (file)
@@ -268,6 +268,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev);
 void destroy_nilfs(struct the_nilfs *nilfs);
 int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
 int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
+unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs);
+void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs);
 int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
 int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
 struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno);
index c64f3680d4f154b2bc2959380e0b04ecd5477d52..5e6f42789afec7b20d6aeec2fc9e0d3f61a43caa 100644 (file)
@@ -125,7 +125,6 @@ struct fw_card {
        struct delayed_work bm_work; /* bus manager job */
        int bm_retries;
        int bm_generation;
-       __be32 bm_transaction_data[2];
        int bm_node_id;
        bool bm_abdicate;
 
@@ -441,12 +440,15 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
                         struct fw_iso_packet *packet,
                         struct fw_iso_buffer *buffer,
                         unsigned long payload);
+void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
 int fw_iso_context_start(struct fw_iso_context *ctx,
                         int cycle, int sync, int tags);
 int fw_iso_context_stop(struct fw_iso_context *ctx);
 void fw_iso_context_destroy(struct fw_iso_context *ctx);
 void fw_iso_resource_manage(struct fw_card *card, int generation,
                            u64 channels_mask, int *channel, int *bandwidth,
-                           bool allocate, __be32 buffer[2]);
+                           bool allocate);
+
+extern struct workqueue_struct *fw_workqueue;
 
 #endif /* _LINUX_FIREWIRE_H */
index 072fe8c93e6f08e03b6593438881665ca6f841f0..42557851b12e03ea510223f192dcc273dffa8f2c 100644 (file)
 #include <linux/pci.h>
 #include <linux/completion.h>
 #include <linux/pm.h>
+#include <linux/mutex.h>
 #ifdef CONFIG_BLK_DEV_IDEACPI
 #include <acpi/acpi.h>
 #endif
 #include <asm/byteorder.h>
 #include <asm/system.h>
 #include <asm/io.h>
-#include <asm/mutex.h>
 
 /* for request_sense */
 #include <linux/cdrom.h>
index 8768c469e93e611f1636748cf5cabf8250896ec7..7454ad7451b49f1d9423254f8e877dda51ea4f4c 100644 (file)
@@ -107,7 +107,7 @@ struct nilfs_super_root {
 #define NILFS_SR_DAT_OFFSET(inode_size)     NILFS_SR_MDT_OFFSET(inode_size, 0)
 #define NILFS_SR_CPFILE_OFFSET(inode_size)  NILFS_SR_MDT_OFFSET(inode_size, 1)
 #define NILFS_SR_SUFILE_OFFSET(inode_size)  NILFS_SR_MDT_OFFSET(inode_size, 2)
-#define NILFS_SR_BYTES                  (sizeof(struct nilfs_super_root))
+#define NILFS_SR_BYTES(inode_size)         NILFS_SR_MDT_OFFSET(inode_size, 3)
 
 /*
  * Maximal mount counts
@@ -845,5 +845,7 @@ struct nilfs_bdesc {
        _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
 #define NILFS_IOCTL_RESIZE  \
        _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
+#define NILFS_IOCTL_SET_ALLOC_RANGE  \
+       _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
 
 #endif /* _LINUX_NILFS_FS_H */
index 79aafbbf430add6d0474d4ce0ddbce69fd927c1d..16c9c091555dfe6f19036eafc7aef59ac5614bb0 100644 (file)
@@ -1782,7 +1782,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
 
 #define skb_queue_walk(queue, skb) \
                for (skb = (queue)->next;                                       \
-                    prefetch(skb->next), (skb != (struct sk_buff *)(queue));   \
+                    skb != (struct sk_buff *)(queue);                          \
                     skb = skb->next)
 
 #define skb_queue_walk_safe(queue, skb, tmp)                                   \
@@ -1791,7 +1791,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
                     skb = tmp, tmp = skb->next)
 
 #define skb_queue_walk_from(queue, skb)                                                \
-               for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
+               for (; skb != (struct sk_buff *)(queue);                        \
                     skb = skb->next)
 
 #define skb_queue_walk_from_safe(queue, skb, tmp)                              \
@@ -1801,7 +1801,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
 
 #define skb_queue_reverse_walk(queue, skb) \
                for (skb = (queue)->prev;                                       \
-                    prefetch(skb->prev), (skb != (struct sk_buff *)(queue));   \
+                    skb != (struct sk_buff *)(queue);                          \
                     skb = skb->prev)
 
 #define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
index 8c7189c3f6ed9b822eece55d1943e6691851307e..e6d6a66a8f715a48d741a2331576fef7ea614ec8 100644 (file)
@@ -538,7 +538,7 @@ struct ieee80211_tx_info {
 };
 
 /**
- * ieee80211_sched_scan_ies - scheduled scan IEs
+ * struct ieee80211_sched_scan_ies - scheduled scan IEs
  *
  * This structure is used to pass the appropriate IEs to be used in scheduled
  * scans for all bands.  It contains both the IEs passed from the userspace
@@ -2278,6 +2278,7 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
 
 /**
  * ieee80211_sta_set_tim - set the TIM bit for a sleeping station
+ * @sta: &struct ieee80211_sta pointer for the sleeping station
  *
  * If a driver buffers frames for a powersave station instead of passing
  * them back to mac80211 for retransmission, the station needs to be told
index 4986ecc49e6566c4a7ac4900de1b540a3a0c1685..c8b172efaa65bf9264ecae76422159214f943b95 100644 (file)
@@ -827,11 +827,6 @@ config SCHED_AUTOGROUP
          desktop applications.  Task group autogeneration is currently based
          upon task session.
 
-config SCHED_TTWU_QUEUE
-       bool
-       depends on !SPARC32
-       default y
-
 config MM_OWNER
        bool
 
@@ -908,7 +903,6 @@ endif
 
 config CC_OPTIMIZE_FOR_SIZE
        bool "Optimize for size"
-       default y
        help
          Enabling this option will pass "-Os" instead of "-O2" to gcc
          resulting in a smaller kernel.
index c62acf45d3b9c0c4954090454de59bd08e2eb180..0516af4150855b5d8e775c980a10ef7e0fe9d6fd 100644 (file)
@@ -2564,7 +2564,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
 
-#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE)
+#if defined(CONFIG_SMP)
        if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
                ttwu_queue_remote(p, cpu);
                return;
index c779ce96e5b577e757fd5f1e69d417359ff30a40..58c25ea5a5c19ef58ec204d79c209744bfc6a59a 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/protocol.h>
index 1c1c093cf279caf9786456e0967cc7547b053276..2b9644e19de0dc8f1d225b19e834403613d40e42 100644 (file)
@@ -96,12 +96,12 @@ static inline struct netlbl_af4list *__af4list_valid_rcu(struct list_head *s,
 
 #define netlbl_af4list_foreach(iter, head)                             \
        for (iter = __af4list_valid((head)->next, head);                \
-            prefetch(iter->list.next), &iter->list != (head);          \
+            &iter->list != (head);                                     \
             iter = __af4list_valid(iter->list.next, head))
 
 #define netlbl_af4list_foreach_rcu(iter, head)                         \
        for (iter = __af4list_valid_rcu((head)->next, head);            \
-            prefetch(iter->list.next), &iter->list != (head);          \
+            &iter->list != (head);                                     \
             iter = __af4list_valid_rcu(iter->list.next, head))
 
 #define netlbl_af4list_foreach_safe(iter, tmp, head)                   \
@@ -163,12 +163,12 @@ static inline struct netlbl_af6list *__af6list_valid_rcu(struct list_head *s,
 
 #define netlbl_af6list_foreach(iter, head)                             \
        for (iter = __af6list_valid((head)->next, head);                \
-            prefetch(iter->list.next), &iter->list != (head);          \
+            &iter->list != (head);                                     \
             iter = __af6list_valid(iter->list.next, head))
 
 #define netlbl_af6list_foreach_rcu(iter, head)                         \
        for (iter = __af6list_valid_rcu((head)->next, head);            \
-            prefetch(iter->list.next), &iter->list != (head);          \
+            &iter->list != (head);                                     \
             iter = __af6list_valid_rcu(iter->list.next, head))
 
 #define netlbl_af6list_foreach_safe(iter, tmp, head)                   \
index b18140ff2b93c48eb2a0dea6074e866a8340b56a..87657dd7714ccccb59f4922f12a880680b7838c8 100644 (file)
@@ -396,6 +396,7 @@ static void out_packet_callback(struct fw_iso_context *context, u32 cycle,
 
        for (i = 0; i < packets; ++i)
                queue_out_packet(s, ++cycle);
+       fw_iso_context_queue_flush(s->context);
 }
 
 static int queue_initial_skip_packets(struct amdtp_out_stream *s)
index 4a37f3a6fab904ec5f8048ec97863a59cc8aeca2..14cacbc655dd790261db8962090c075d7a993a70 100644 (file)
@@ -49,10 +49,9 @@ static int pcr_modify(struct cmp_connection *c,
                      enum bus_reset_handling bus_reset_handling)
 {
        struct fw_device *device = fw_parent_device(c->resources.unit);
-       __be32 *buffer = c->resources.buffer;
        int generation = c->resources.generation;
        int rcode, errors = 0;
-       __be32 old_arg;
+       __be32 old_arg, buffer[2];
        int err;
 
        buffer[0] = c->last_pcr_value;
index 775dbd5f34452b76e53086d2f8b6225020cb6ce4..bb9c0c1fb529828f1ccbe6ff7ee86fa8849c4a45 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/jiffies.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
-#include <linux/slab.h>
 #include <linux/spinlock.h>
 #include "iso-resources.h"
 
  */
 int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
 {
-       r->buffer = kmalloc(2 * 4, GFP_KERNEL);
-       if (!r->buffer)
-               return -ENOMEM;
-
        r->channels_mask = ~0uLL;
        r->unit = fw_unit_get(unit);
        mutex_init(&r->mutex);
@@ -44,7 +39,6 @@ int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
 void fw_iso_resources_destroy(struct fw_iso_resources *r)
 {
        WARN_ON(r->allocated);
-       kfree(r->buffer);
        mutex_destroy(&r->mutex);
        fw_unit_put(r->unit);
 }
@@ -131,7 +125,7 @@ retry_after_bus_reset:
 
        bandwidth = r->bandwidth + r->bandwidth_overhead;
        fw_iso_resource_manage(card, r->generation, r->channels_mask,
-                              &channel, &bandwidth, true, r->buffer);
+                              &channel, &bandwidth, true);
        if (channel == -EAGAIN) {
                mutex_unlock(&r->mutex);
                goto retry_after_bus_reset;
@@ -184,7 +178,7 @@ int fw_iso_resources_update(struct fw_iso_resources *r)
        bandwidth = r->bandwidth + r->bandwidth_overhead;
 
        fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
-                              &channel, &bandwidth, true, r->buffer);
+                              &channel, &bandwidth, true);
        /*
         * When another bus reset happens, pretend that the allocation
         * succeeded; we will try again for the new generation later.
@@ -220,7 +214,7 @@ void fw_iso_resources_free(struct fw_iso_resources *r)
        if (r->allocated) {
                bandwidth = r->bandwidth + r->bandwidth_overhead;
                fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
-                                      &channel, &bandwidth, false, r->buffer);
+                                      &channel, &bandwidth, false);
                if (channel < 0)
                        dev_err(&r->unit->device,
                                "isochronous resource deallocation failed\n");
index 3f0730e4d84181771067199708f497146762d244..5a9af7c61657e5429f35a0dd6c844d3af6d069c6 100644 (file)
@@ -24,7 +24,6 @@ struct fw_iso_resources {
        unsigned int bandwidth_overhead;
        int generation; /* in which allocation is valid */
        bool allocated;
-       __be32 *buffer;
 };
 
 int fw_iso_resources_init(struct fw_iso_resources *r,
index 8ce792ea08e9f782d3e661dbbb99798839a08ed4..1fd29b2daa9204fff6e345c8871799dcb0e04e5e 100755 (executable)
@@ -36,6 +36,7 @@ $default{"REBOOT_ON_SUCCESS"} = 1;
 $default{"POWEROFF_ON_SUCCESS"}        = 0;
 $default{"BUILD_OPTIONS"}      = "";
 $default{"BISECT_SLEEP_TIME"}  = 60;   # sleep time between bisects
+$default{"PATCHCHECK_SLEEP_TIME"} = 60; # sleep time between patch checks
 $default{"CLEAR_LOG"}          = 0;
 $default{"BISECT_MANUAL"}      = 0;
 $default{"BISECT_SKIP"}                = 1;
@@ -96,6 +97,7 @@ my $monitor_pid;
 my $monitor_cnt = 0;
 my $sleep_time;
 my $bisect_sleep_time;
+my $patchcheck_sleep_time;
 my $store_failures;
 my $timeout;
 my $booted_timeout;
@@ -112,6 +114,7 @@ my $successes = 0;
 
 my %entered_configs;
 my %config_help;
+my %variable;
 
 $config_help{"MACHINE"} = << "EOF"
  The machine hostname that you will test.
@@ -260,6 +263,39 @@ sub get_ktest_configs {
     }
 }
 
+sub process_variables {
+    my ($value) = @_;
+    my $retval = "";
+
+    # We want to check for '\', and it is just easier
+    # to check the previous characet of '$' and not need
+    # to worry if '$' is the first character. By adding
+    # a space to $value, we can just check [^\\]\$ and
+    # it will still work.
+    $value = " $value";
+
+    while ($value =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
+       my $begin = $1;
+       my $var = $2;
+       my $end = $3;
+       # append beginning of value to retval
+       $retval = "$retval$begin";
+       if (defined($variable{$var})) {
+           $retval = "$retval$variable{$var}";
+       } else {
+           # put back the origin piece.
+           $retval = "$retval\$\{$var\}";
+       }
+       $value = $end;
+    }
+    $retval = "$retval$value";
+
+    # remove the space added in the beginning
+    $retval =~ s/ //;
+
+    return "$retval"
+}
+
 sub set_value {
     my ($lvalue, $rvalue) = @_;
 
@@ -269,10 +305,22 @@ sub set_value {
     if ($rvalue =~ /^\s*$/) {
        delete $opt{$lvalue};
     } else {
+       $rvalue = process_variables($rvalue);
        $opt{$lvalue} = $rvalue;
     }
 }
 
+sub set_variable {
+    my ($lvalue, $rvalue) = @_;
+
+    if ($rvalue =~ /^\s*$/) {
+       delete $variable{$lvalue};
+    } else {
+       $rvalue = process_variables($rvalue);
+       $variable{$lvalue} = $rvalue;
+    }
+}
+
 sub read_config {
     my ($config) = @_;
 
@@ -385,6 +433,22 @@ sub read_config {
                    $repeats{$val} = $repeat;
                }
            }
+       } elsif (/^\s*([A-Z_\[\]\d]+)\s*:=\s*(.*?)\s*$/) {
+           next if ($skip);
+
+           my $lvalue = $1;
+           my $rvalue = $2;
+
+           # process config variables.
+           # Config variables are only active while reading the
+           # config and can be defined anywhere. They also ignore
+           # TEST_START and DEFAULTS, but are skipped if they are in
+           # on of these sections that have SKIP defined.
+           # The save variable can be
+           # defined multiple times and the new one simply overrides
+           # the prevous one.
+           set_variable($lvalue, $rvalue);
+
        } else {
            die "$name: $.: Garbage found in config\n$_";
        }
@@ -838,6 +902,7 @@ sub monitor {
 
        if ($stop_test_after > 0 && !$booted && !$bug) {
            if (time - $monitor_start > $stop_test_after) {
+               doprint "STOP_TEST_AFTER ($stop_test_after seconds) timed out\n";
                $done = 1;
            }
        }
@@ -907,7 +972,7 @@ sub install {
     return if (!defined($post_install));
 
     my $cp_post_install = $post_install;
-    $cp_post_install = s/\$KERNEL_VERSION/$version/g;
+    $cp_post_install =~ s/\$KERNEL_VERSION/$version/g;
     run_command "$cp_post_install" or
        dodie "Failed to run post install";
 }
@@ -1247,14 +1312,14 @@ sub run_bisect_test {
 
     if ($failed) {
        $result = 0;
-
-       # reboot the box to a good kernel
-       if ($type ne "build") {
-           bisect_reboot;
-       }
     } else {
        $result = 1;
     }
+
+    # reboot the box to a kernel we can ssh to
+    if ($type ne "build") {
+       bisect_reboot;
+    }
     $in_bisect = 0;
 
     return $result;
@@ -1763,6 +1828,14 @@ sub config_bisect {
     success $i;
 }
 
+sub patchcheck_reboot {
+    doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
+    reboot;
+    start_monitor;
+    wait_for_monitor $patchcheck_sleep_time;
+    end_monitor;
+}
+
 sub patchcheck {
     my ($i) = @_;
 
@@ -1854,6 +1927,8 @@ sub patchcheck {
        end_monitor;
        return 0 if ($failed);
 
+       patchcheck_reboot;
+
     }
     $in_patchcheck = 0;
     success $i;
@@ -1944,7 +2019,7 @@ for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
     }
 }
 
-sub set_test_option {
+sub __set_test_option {
     my ($name, $i) = @_;
 
     my $option = "$name\[$i\]";
@@ -1970,6 +2045,72 @@ sub set_test_option {
     return undef;
 }
 
+sub eval_option {
+    my ($option, $i) = @_;
+
+    # Add space to evaluate the character before $
+    $option = " $option";
+    my $retval = "";
+
+    while ($option =~ /(.*?[^\\])\$\{(.*?)\}(.*)/) {
+       my $start = $1;
+       my $var = $2;
+       my $end = $3;
+
+       # Append beginning of line
+       $retval = "$retval$start";
+
+       # If the iteration option OPT[$i] exists, then use that.
+       # otherwise see if the default OPT (without [$i]) exists.
+
+       my $o = "$var\[$i\]";
+
+       if (defined($opt{$o})) {
+           $o = $opt{$o};
+           $retval = "$retval$o";
+       } elsif (defined($opt{$var})) {
+           $o = $opt{$var};
+           $retval = "$retval$o";
+       } else {
+           $retval = "$retval\$\{$var\}";
+       }
+
+       $option = $end;
+    }
+
+    $retval = "$retval$option";
+
+    $retval =~ s/^ //;
+
+    return $retval;
+}
+
+sub set_test_option {
+    my ($name, $i) = @_;
+
+    my $option = __set_test_option($name, $i);
+    return $option if (!defined($option));
+
+    my $prev = "";
+
+    # Since an option can evaluate to another option,
+    # keep iterating until we do not evaluate any more
+    # options.
+    my $r = 0;
+    while ($prev ne $option) {
+       # Check for recursive evaluations.
+       # 100 deep should be more than enough.
+       if ($r++ > 100) {
+           die "Over 100 evaluations accurred with $name\n" .
+               "Check for recursive variables\n";
+       }
+       $prev = $option;
+       $option = eval_option($option, $i);
+    }
+
+    return $option;
+}
+
 # First we need to do is the builds
 for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
 
@@ -2003,6 +2144,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
     $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
     $sleep_time = set_test_option("SLEEP_TIME", $i);
     $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
+    $patchcheck_sleep_time = set_test_option("PATCHCHECK_SLEEP_TIME", $i);
     $bisect_manual = set_test_option("BISECT_MANUAL", $i);
     $bisect_skip = set_test_option("BISECT_SKIP", $i);
     $store_failures = set_test_option("STORE_FAILURES", $i);
index 4c5d6bd74a0207b50d8a74ff204270afdb60601a..48cbcc80602aebf2073101fa3033b5522693a88e 100644 (file)
 # ktest will fail to execute, and no tests will run.
 #
 
+#### Config variables ####
+#
+# This config file can also contain "config variables".
+# These are assigned with ":=" instead of the ktest option
+# assigment "=".
+#
+# The difference between ktest options and config variables
+# is that config variables can be used multiple times,
+# where each instance will override the previous instance.
+# And that they only live at time of processing this config.
+#
+# The advantage to config variables are that they can be used
+# by any option or any other config variables to define thing
+# that you may use over and over again in the options.
+#
+# For example:
+#
+# USER      := root
+# TARGET    := mybox
+# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test
+#
+# TEST_START
+# MIN_CONFIG = config1
+# TEST = ${TEST_CASE}
+#
+# TEST_START
+# MIN_CONFIG = config2
+# TEST = ${TEST_CASE}
+#
+# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test2
+#
+# TEST_START
+# MIN_CONFIG = config1
+# TEST = ${TEST_CASE}
+#
+# TEST_START
+# MIN_CONFIG = config2
+# TEST = ${TEST_CASE}
+#
+# TEST_DIR := /home/me/test
+#
+# BUILD_DIR = ${TEST_DIR}/linux.git
+# OUTPUT_DIR = ${TEST_DIR}/test
+#
+# Note, the config variables are evaluated immediately, thus
+# updating TARGET after TEST_CASE has been assigned does nothing
+# to TEST_CASE.
+#
+# As shown in the example, to evaluate a config variable, you
+# use the ${X} convention. Simple $X will not work.
+#
+# If the config variable does not exist, the ${X} will not
+# be evaluated. Thus:
+#
+# MAKE_CMD = PATH=/mypath:${PATH} make
+#
+# If PATH is not a config variable, then the ${PATH} in
+# the MAKE_CMD option will be evaluated by the shell when
+# the MAKE_CMD option is passed into shell processing.
+
+#### Using options in other options ####
+#
+# Options that are defined in the config file may also be used
+# by other options. All options are evaulated at time of
+# use (except that config variables are evaluated at config
+# processing time).
+#
+# If an ktest option is used within another option, instead of
+# typing it again in that option you can simply use the option
+# just like you can config variables.
+#
+# MACHINE = mybox
+#
+# TEST = ssh root@${MACHINE} /path/to/test
+#
+# The option will be used per test case. Thus:
+#
+# TEST_TYPE = test
+# TEST = ssh root@{MACHINE}
+#
+# TEST_START
+# MACHINE = box1
+#
+# TEST_START
+# MACHINE = box2
+#
+# For both test cases, MACHINE will be evaluated at the time
+# of the test case. The first test will run ssh root@box1
+# and the second will run ssh root@box2.
 
 #### Mandatory Default Options ####
 
 # (default 60)
 #BISECT_SLEEP_TIME = 60
 
+# The time in between patch checks to sleep (in seconds)
+# (default 60)
+#PATCHCHECK_SLEEP_TIME = 60
+
 # Reboot the target box on error (default 0)
 #REBOOT_ON_ERROR = 0