]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
authorLinus Torvalds <torvalds@woody.osdl.org>
Tue, 12 Dec 2006 16:14:46 +0000 (08:14 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Tue, 12 Dec 2006 16:14:46 +0000 (08:14 -0800)
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (29 commits)
  sh: Fixup SH-2 BUG() trap handling.
  sh: Use early_param() for earlyprintk parsing.
  sh: Fix .empty_zero_page alignment for PAGE_SIZE > 4096.
  sh: Fixup .data.page_aligned.
  sh: Hook up SH7722 scif ipr interrupts.
  sh: Fixup sh_bios() trap handling.
  sh: SH-MobileR SH7722 CPU support.
  sh: Fixup dma_cache_sync() callers.
  sh: Convert remaining remap_area_pages() users to ioremap_page_range().
  sh: Fixup kernel_execve() for syscall cleanups.
  sh: Fix get_wchan().
  sh: BUG() handling through trapa vector.
  rtc: rtc-sh: alarm support.
  rtc: rtc-sh: fix rtc for out-by-one for the month.
  sh: Kill off unused SE7619 I/O ops.
  serial: sh-sci: Shut up various sci_rxd_in() gcc4 warnings.
  sh: Split out atomic ops logically.
  sh: Fix Solution Engine 7619 build.
  sh: Trivial build fixes for SH-2 support.
  sh: IPR IRQ updates for SH7619/SH7206.
  ...

119 files changed:
Documentation/networking/dccp.txt
arch/avr32/boards/atstk1000/atstk1002.c
arch/avr32/kernel/avr32_ksyms.c
arch/avr32/kernel/process.c
arch/avr32/kernel/setup.c
arch/avr32/lib/delay.c
arch/avr32/mach-at32ap/at32ap7000.c
arch/avr32/mach-at32ap/extint.c
arch/avr32/mach-at32ap/intc.c
arch/avr32/mach-at32ap/pio.c
arch/avr32/mach-at32ap/sm.c [deleted file]
arch/mips/configs/malta_defconfig
arch/mips/kernel/vmlinux.lds.S
arch/mips/mips-boards/malta/Makefile
arch/mips/mips-boards/malta/malta_setup.c
arch/mips/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/of_device.c
arch/powerpc/kernel/of_platform.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/mm/numa.c
arch/powerpc/platforms/52xx/lite5200.c
arch/powerpc/platforms/cell/cbe_thermal.c
arch/powerpc/platforms/cell/pmu.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/maple/pci.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/ps3/Kconfig
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/eeh_driver.c
arch/powerpc/platforms/pseries/hotplug-cpu.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/dcr.S [deleted file]
arch/powerpc/sysdev/qe_lib/qe_ic.c
arch/powerpc/sysdev/rom.c
arch/powerpc/xmon/xmon.c
arch/ppc/kernel/pci.c
block/ll_rw_blk.c
block/scsi_ioctl.c
crypto/sha512.c
drivers/atm/.gitignore
drivers/ide/ide-probe.c
drivers/media/video/usbvision/usbvision-i2c.c
drivers/net/8139too.c
drivers/net/hamradio/baycom_epp.c
drivers/net/wan/Kconfig
drivers/ps3/Makefile
drivers/ps3/vuart.c [new file with mode: 0644]
drivers/ps3/vuart.h [new file with mode: 0644]
fs/jfs/jfs_filsys.h
include/asm-avr32/arch-at32ap/at32ap7000.h [new file with mode: 0644]
include/asm-avr32/arch-at32ap/board.h
include/asm-avr32/arch-at32ap/portmux.h
include/asm-avr32/dma-mapping.h
include/asm-mips/compat.h
include/asm-mips/mach-ip27/irq.h
include/asm-mips/mach-ip27/topology.h
include/asm-mips/sn/arch.h
include/asm-mips/sn/klconfig.h
include/asm-powerpc/Kbuild
include/asm-powerpc/bug.h
include/asm-powerpc/cputable.h
include/asm-powerpc/dcr-native.h
include/asm-powerpc/dcr.h
include/asm-powerpc/hw_irq.h
include/asm-powerpc/module.h
include/asm-powerpc/pci-bridge.h
include/asm-powerpc/pci.h
include/asm-powerpc/reg.h
include/asm-powerpc/rtas.h
include/asm-ppc/pci-bridge.h
include/asm-ppc/pci.h
include/asm-ppc/reg_booke.h
include/linux/blkdev.h
include/linux/dccp.h
include/linux/fsl_devices.h
include/linux/ide.h
include/linux/seqlock.h
include/linux/tfrc.h
include/net/ax25.h
net/ax25/ax25_addr.c
net/core/netpoll.c
net/dccp/ackvec.c
net/dccp/ccid.h
net/dccp/ccids/ccid2.c
net/dccp/ccids/ccid3.c
net/dccp/ccids/ccid3.h
net/dccp/ccids/lib/packet_history.c
net/dccp/ccids/lib/packet_history.h
net/dccp/ccids/lib/tfrc.h
net/dccp/ccids/lib/tfrc_equation.c
net/dccp/dccp.h
net/dccp/feat.c
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/options.c
net/dccp/output.c
net/dccp/proto.c
net/dccp/timer.c
net/ipv4/ipvs/ip_vs_sync.c

index dda15886bcb51045ad5168dcafb8e0e5048c6301..387482e46c474ed49bcc31081c39528ad1fdbfa4 100644 (file)
@@ -19,7 +19,8 @@ for real time and multimedia traffic.
 
 It has a base protocol and pluggable congestion control IDs (CCIDs).
 
-It is at experimental RFC status and the homepage for DCCP as a protocol is at:
+It is at proposed standard RFC status and the homepage for DCCP as a protocol
+is at:
        http://www.read.cs.ucla.edu/dccp/
 
 Missing features
@@ -34,9 +35,6 @@ The known bugs are at:
 Socket options
 ==============
 
-DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
-calculations.
-
 DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
 service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
 the socket will fall back to 0 (which means that no meaningful service code
index cced73c58115f8323d4b19e95451ad76f4038374..32b361f31c2ce443a9c8760bd80ce6eeaade8d44 100644 (file)
@@ -7,20 +7,83 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
 #include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
 
+#include <asm/io.h>
+#include <asm/setup.h>
 #include <asm/arch/board.h>
 #include <asm/arch/init.h>
 
-struct eth_platform_data __initdata eth0_data = {
-       .valid          = 1,
-       .mii_phy_addr   = 0x10,
-       .is_rmii        = 0,
-       .hw_addr        = { 0x6a, 0x87, 0x71, 0x14, 0xcd, 0xcb },
+struct eth_addr {
+       u8 addr[6];
 };
 
+static struct eth_addr __initdata hw_addr[2];
+
+static struct eth_platform_data __initdata eth_data[2];
 extern struct lcdc_platform_data atstk1000_fb0_data;
 
+/*
+ * The next two functions should go away as the boot loader is
+ * supposed to initialize the macb address registers with a valid
+ * ethernet address. But we need to keep it around for a while until
+ * we can be reasonably sure the boot loader does this.
+ *
+ * The phy_id is ignored as the driver will probe for it.
+ */
+static int __init parse_tag_ethernet(struct tag *tag)
+{
+       int i;
+
+       i = tag->u.ethernet.mac_index;
+       if (i < ARRAY_SIZE(hw_addr))
+               memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
+                      sizeof(hw_addr[i].addr));
+
+       return 0;
+}
+__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
+
+static void __init set_hw_addr(struct platform_device *pdev)
+{
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       const u8 *addr;
+       void __iomem *regs;
+       struct clk *pclk;
+
+       if (!res)
+               return;
+       if (pdev->id >= ARRAY_SIZE(hw_addr))
+               return;
+
+       addr = hw_addr[pdev->id].addr;
+       if (!is_valid_ether_addr(addr))
+               return;
+
+       /*
+        * Since this is board-specific code, we'll cheat and use the
+        * physical address directly as we happen to know that it's
+        * the same as the virtual address.
+        */
+       regs = (void __iomem __force *)res->start;
+       pclk = clk_get(&pdev->dev, "pclk");
+       if (!pclk)
+               return;
+
+       clk_enable(pclk);
+       __raw_writel((addr[3] << 24) | (addr[2] << 16)
+                    | (addr[1] << 8) | addr[0], regs + 0x98);
+       __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
+       clk_disable(pclk);
+       clk_put(pclk);
+}
+
 void __init setup_board(void)
 {
        at32_map_usart(1, 0);   /* /dev/ttyS0 */
@@ -38,7 +101,8 @@ static int __init atstk1002_init(void)
        at32_add_device_usart(1);
        at32_add_device_usart(2);
 
-       at32_add_device_eth(0, &eth0_data);
+       set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+
        at32_add_device_spi(0);
        at32_add_device_lcdc(0, &atstk1000_fb0_data);
 
index 372e3f8b2417737c38d3be01bcd3990d4f71967e..7c4c76114bbafdaae522246bee5ba9e3c70fd1bc 100644 (file)
@@ -7,12 +7,12 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/module.h>
 
 #include <asm/checksum.h>
 #include <asm/uaccess.h>
-#include <asm/delay.h>
 
 /*
  * GCC functions
index 317dc50945f231896ca079f382eebbfe67201cb4..0b4325946a41629cc7a4755a7c679311e85e688f 100644 (file)
@@ -38,6 +38,13 @@ void cpu_idle(void)
 
 void machine_halt(void)
 {
+       /*
+        * Enter Stop mode. The 32 kHz oscillator will keep running so
+        * the RTC will keep the time properly and the system will
+        * boot quickly.
+        */
+       asm volatile("sleep 3\n\t"
+                    "sub pc, -2");
 }
 
 void machine_power_off(void)
index ea2d1ffee4780c14ea6237f21090d1cbfd036f65..a34211601008420f456e185b825c9ed426eb51c7 100644 (file)
@@ -229,30 +229,6 @@ static int __init parse_tag_rsvd_mem(struct tag *tag)
 }
 __tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
 
-static int __init parse_tag_ethernet(struct tag *tag)
-{
-#if 0
-       const struct platform_device *pdev;
-
-       /*
-        * We really need a bus type that supports "classes"...this
-        * will do for now (until we must handle other kinds of
-        * ethernet controllers)
-        */
-       pdev = platform_get_device("macb", tag->u.ethernet.mac_index);
-       if (pdev && pdev->dev.platform_data) {
-               struct eth_platform_data *data = pdev->dev.platform_data;
-
-               data->valid = 1;
-               data->mii_phy_addr = tag->u.ethernet.mii_phy_addr;
-               memcpy(data->hw_addr, tag->u.ethernet.hw_address,
-                      sizeof(data->hw_addr));
-       }
-#endif
-       return 0;
-}
-__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
-
 /*
  * Scan the tag table for this tag, and call its parse function. The
  * tag table is built by the linker from all the __tagtable
index 462c8307b680fa00efd8a98e0010ac5ee991bbd3..b3bc0b56e2c6506849de3ba758aae262f552f477 100644 (file)
@@ -12,9 +12,9 @@
 
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/param.h>
 #include <linux/types.h>
 
-#include <asm/delay.h>
 #include <asm/processor.h>
 #include <asm/sysreg.h>
 
index 7ff6ad8bab5faa5132871aa869edeec875dd2001..48f4ef38c70e4a097ead99b9dc8a3ccd8195048f 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/io.h>
 
+#include <asm/arch/at32ap7000.h>
 #include <asm/arch/board.h>
 #include <asm/arch/portmux.h>
 #include <asm/arch/sm.h>
@@ -57,6 +58,9 @@ static struct platform_device _name##_id##_device = {         \
        .num_resources  = ARRAY_SIZE(_name##_id##_resource),    \
 }
 
+#define select_peripheral(pin, periph, flags)                  \
+       at32_select_periph(GPIO_PIN_##pin, GPIO_##periph, flags)
+
 #define DEV_CLK(_name, devname, bus, _index)                   \
 static struct clk devname##_##_name = {                                \
        .name           = #_name,                               \
@@ -67,18 +71,6 @@ static struct clk devname##_##_name = {                              \
        .index          = _index,                               \
 }
 
-enum {
-       PIOA,
-       PIOB,
-       PIOC,
-       PIOD,
-};
-
-enum {
-       FUNC_A,
-       FUNC_B,
-};
-
 unsigned long at32ap7000_osc_rates[3] = {
        [0] = 32768,
        /* FIXME: these are ATSTK1002-specific */
@@ -569,26 +561,26 @@ DEV_CLK(usart, atmel_usart3, pba, 6);
 
 static inline void configure_usart0_pins(void)
 {
-       portmux_set_func(PIOA,  8, FUNC_B);     /* RXD  */
-       portmux_set_func(PIOA,  9, FUNC_B);     /* TXD  */
+       select_peripheral(PA(8),  PERIPH_B, 0); /* RXD  */
+       select_peripheral(PA(9),  PERIPH_B, 0); /* TXD  */
 }
 
 static inline void configure_usart1_pins(void)
 {
-       portmux_set_func(PIOA, 17, FUNC_A);     /* RXD  */
-       portmux_set_func(PIOA, 18, FUNC_A);     /* TXD  */
+       select_peripheral(PA(17), PERIPH_A, 0); /* RXD  */
+       select_peripheral(PA(18), PERIPH_A, 0); /* TXD  */
 }
 
 static inline void configure_usart2_pins(void)
 {
-       portmux_set_func(PIOB, 26, FUNC_B);     /* RXD  */
-       portmux_set_func(PIOB, 27, FUNC_B);     /* TXD  */
+       select_peripheral(PB(26), PERIPH_B, 0); /* RXD  */
+       select_peripheral(PB(27), PERIPH_B, 0); /* TXD  */
 }
 
 static inline void configure_usart3_pins(void)
 {
-       portmux_set_func(PIOB, 18, FUNC_B);     /* RXD  */
-       portmux_set_func(PIOB, 17, FUNC_B);     /* TXD  */
+       select_peripheral(PB(18), PERIPH_B, 0); /* RXD  */
+       select_peripheral(PB(17), PERIPH_B, 0); /* TXD  */
 }
 
 static struct platform_device *at32_usarts[4];
@@ -654,6 +646,15 @@ DEFINE_DEV_DATA(macb, 0);
 DEV_CLK(hclk, macb0, hsb, 8);
 DEV_CLK(pclk, macb0, pbb, 6);
 
+static struct eth_platform_data macb1_data;
+static struct resource macb1_resource[] = {
+       PBMEM(0xfff01c00),
+       IRQ(26),
+};
+DEFINE_DEV_DATA(macb, 1);
+DEV_CLK(hclk, macb1, hsb, 9);
+DEV_CLK(pclk, macb1, pbb, 7);
+
 struct platform_device *__init
 at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
 {
@@ -663,27 +664,54 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
        case 0:
                pdev = &macb0_device;
 
-               portmux_set_func(PIOC,  3, FUNC_A);     /* TXD0 */
-               portmux_set_func(PIOC,  4, FUNC_A);     /* TXD1 */
-               portmux_set_func(PIOC,  7, FUNC_A);     /* TXEN */
-               portmux_set_func(PIOC,  8, FUNC_A);     /* TXCK */
-               portmux_set_func(PIOC,  9, FUNC_A);     /* RXD0 */
-               portmux_set_func(PIOC, 10, FUNC_A);     /* RXD1 */
-               portmux_set_func(PIOC, 13, FUNC_A);     /* RXER */
-               portmux_set_func(PIOC, 15, FUNC_A);     /* RXDV */
-               portmux_set_func(PIOC, 16, FUNC_A);     /* MDC  */
-               portmux_set_func(PIOC, 17, FUNC_A);     /* MDIO */
+               select_peripheral(PC(3),  PERIPH_A, 0); /* TXD0 */
+               select_peripheral(PC(4),  PERIPH_A, 0); /* TXD1 */
+               select_peripheral(PC(7),  PERIPH_A, 0); /* TXEN */
+               select_peripheral(PC(8),  PERIPH_A, 0); /* TXCK */
+               select_peripheral(PC(9),  PERIPH_A, 0); /* RXD0 */
+               select_peripheral(PC(10), PERIPH_A, 0); /* RXD1 */
+               select_peripheral(PC(13), PERIPH_A, 0); /* RXER */
+               select_peripheral(PC(15), PERIPH_A, 0); /* RXDV */
+               select_peripheral(PC(16), PERIPH_A, 0); /* MDC  */
+               select_peripheral(PC(17), PERIPH_A, 0); /* MDIO */
+
+               if (!data->is_rmii) {
+                       select_peripheral(PC(0),  PERIPH_A, 0); /* COL  */
+                       select_peripheral(PC(1),  PERIPH_A, 0); /* CRS  */
+                       select_peripheral(PC(2),  PERIPH_A, 0); /* TXER */
+                       select_peripheral(PC(5),  PERIPH_A, 0); /* TXD2 */
+                       select_peripheral(PC(6),  PERIPH_A, 0); /* TXD3 */
+                       select_peripheral(PC(11), PERIPH_A, 0); /* RXD2 */
+                       select_peripheral(PC(12), PERIPH_A, 0); /* RXD3 */
+                       select_peripheral(PC(14), PERIPH_A, 0); /* RXCK */
+                       select_peripheral(PC(18), PERIPH_A, 0); /* SPD  */
+               }
+               break;
+
+       case 1:
+               pdev = &macb1_device;
+
+               select_peripheral(PD(13), PERIPH_B, 0);         /* TXD0 */
+               select_peripheral(PD(14), PERIPH_B, 0);         /* TXD1 */
+               select_peripheral(PD(11), PERIPH_B, 0);         /* TXEN */
+               select_peripheral(PD(12), PERIPH_B, 0);         /* TXCK */
+               select_peripheral(PD(10), PERIPH_B, 0);         /* RXD0 */
+               select_peripheral(PD(6),  PERIPH_B, 0);         /* RXD1 */
+               select_peripheral(PD(5),  PERIPH_B, 0);         /* RXER */
+               select_peripheral(PD(4),  PERIPH_B, 0);         /* RXDV */
+               select_peripheral(PD(3),  PERIPH_B, 0);         /* MDC  */
+               select_peripheral(PD(2),  PERIPH_B, 0);         /* MDIO */
 
                if (!data->is_rmii) {
-                       portmux_set_func(PIOC,  0, FUNC_A);     /* COL  */
-                       portmux_set_func(PIOC,  1, FUNC_A);     /* CRS  */
-                       portmux_set_func(PIOC,  2, FUNC_A);     /* TXER */
-                       portmux_set_func(PIOC,  5, FUNC_A);     /* TXD2 */
-                       portmux_set_func(PIOC,  6, FUNC_A);     /* TXD3 */
-                       portmux_set_func(PIOC, 11, FUNC_A);     /* RXD2 */
-                       portmux_set_func(PIOC, 12, FUNC_A);     /* RXD3 */
-                       portmux_set_func(PIOC, 14, FUNC_A);     /* RXCK */
-                       portmux_set_func(PIOC, 18, FUNC_A);     /* SPD  */
+                       select_peripheral(PC(19), PERIPH_B, 0); /* COL  */
+                       select_peripheral(PC(23), PERIPH_B, 0); /* CRS  */
+                       select_peripheral(PC(26), PERIPH_B, 0); /* TXER */
+                       select_peripheral(PC(27), PERIPH_B, 0); /* TXD2 */
+                       select_peripheral(PC(28), PERIPH_B, 0); /* TXD3 */
+                       select_peripheral(PC(29), PERIPH_B, 0); /* RXD2 */
+                       select_peripheral(PC(30), PERIPH_B, 0); /* RXD3 */
+                       select_peripheral(PC(24), PERIPH_B, 0); /* RXCK */
+                       select_peripheral(PD(15), PERIPH_B, 0); /* SPD  */
                }
                break;
 
@@ -714,12 +742,12 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
        switch (id) {
        case 0:
                pdev = &spi0_device;
-               portmux_set_func(PIOA,  0, FUNC_A);     /* MISO  */
-               portmux_set_func(PIOA,  1, FUNC_A);     /* MOSI  */
-               portmux_set_func(PIOA,  2, FUNC_A);     /* SCK   */
-               portmux_set_func(PIOA,  3, FUNC_A);     /* NPCS0 */
-               portmux_set_func(PIOA,  4, FUNC_A);     /* NPCS1 */
-               portmux_set_func(PIOA,  5, FUNC_A);     /* NPCS2 */
+               select_peripheral(PA(0),  PERIPH_A, 0); /* MISO  */
+               select_peripheral(PA(1),  PERIPH_A, 0); /* MOSI  */
+               select_peripheral(PA(2),  PERIPH_A, 0); /* SCK   */
+               select_peripheral(PA(3),  PERIPH_A, 0); /* NPCS0 */
+               select_peripheral(PA(4),  PERIPH_A, 0); /* NPCS1 */
+               select_peripheral(PA(5),  PERIPH_A, 0); /* NPCS2 */
                break;
 
        default:
@@ -762,37 +790,37 @@ at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data)
        switch (id) {
        case 0:
                pdev = &lcdc0_device;
-               portmux_set_func(PIOC, 19, FUNC_A);     /* CC     */
-               portmux_set_func(PIOC, 20, FUNC_A);     /* HSYNC  */
-               portmux_set_func(PIOC, 21, FUNC_A);     /* PCLK   */
-               portmux_set_func(PIOC, 22, FUNC_A);     /* VSYNC  */
-               portmux_set_func(PIOC, 23, FUNC_A);     /* DVAL   */
-               portmux_set_func(PIOC, 24, FUNC_A);     /* MODE   */
-               portmux_set_func(PIOC, 25, FUNC_A);     /* PWR    */
-               portmux_set_func(PIOC, 26, FUNC_A);     /* DATA0  */
-               portmux_set_func(PIOC, 27, FUNC_A);     /* DATA1  */
-               portmux_set_func(PIOC, 28, FUNC_A);     /* DATA2  */
-               portmux_set_func(PIOC, 29, FUNC_A);     /* DATA3  */
-               portmux_set_func(PIOC, 30, FUNC_A);     /* DATA4  */
-               portmux_set_func(PIOC, 31, FUNC_A);     /* DATA5  */
-               portmux_set_func(PIOD,  0, FUNC_A);     /* DATA6  */
-               portmux_set_func(PIOD,  1, FUNC_A);     /* DATA7  */
-               portmux_set_func(PIOD,  2, FUNC_A);     /* DATA8  */
-               portmux_set_func(PIOD,  3, FUNC_A);     /* DATA9  */
-               portmux_set_func(PIOD,  4, FUNC_A);     /* DATA10 */
-               portmux_set_func(PIOD,  5, FUNC_A);     /* DATA11 */
-               portmux_set_func(PIOD,  6, FUNC_A);     /* DATA12 */
-               portmux_set_func(PIOD,  7, FUNC_A);     /* DATA13 */
-               portmux_set_func(PIOD,  8, FUNC_A);     /* DATA14 */
-               portmux_set_func(PIOD,  9, FUNC_A);     /* DATA15 */
-               portmux_set_func(PIOD, 10, FUNC_A);     /* DATA16 */
-               portmux_set_func(PIOD, 11, FUNC_A);     /* DATA17 */
-               portmux_set_func(PIOD, 12, FUNC_A);     /* DATA18 */
-               portmux_set_func(PIOD, 13, FUNC_A);     /* DATA19 */
-               portmux_set_func(PIOD, 14, FUNC_A);     /* DATA20 */
-               portmux_set_func(PIOD, 15, FUNC_A);     /* DATA21 */
-               portmux_set_func(PIOD, 16, FUNC_A);     /* DATA22 */
-               portmux_set_func(PIOD, 17, FUNC_A);     /* DATA23 */
+               select_peripheral(PC(19), PERIPH_A, 0); /* CC     */
+               select_peripheral(PC(20), PERIPH_A, 0); /* HSYNC  */
+               select_peripheral(PC(21), PERIPH_A, 0); /* PCLK   */
+               select_peripheral(PC(22), PERIPH_A, 0); /* VSYNC  */
+               select_peripheral(PC(23), PERIPH_A, 0); /* DVAL   */
+               select_peripheral(PC(24), PERIPH_A, 0); /* MODE   */
+               select_peripheral(PC(25), PERIPH_A, 0); /* PWR    */
+               select_peripheral(PC(26), PERIPH_A, 0); /* DATA0  */
+               select_peripheral(PC(27), PERIPH_A, 0); /* DATA1  */
+               select_peripheral(PC(28), PERIPH_A, 0); /* DATA2  */
+               select_peripheral(PC(29), PERIPH_A, 0); /* DATA3  */
+               select_peripheral(PC(30), PERIPH_A, 0); /* DATA4  */
+               select_peripheral(PC(31), PERIPH_A, 0); /* DATA5  */
+               select_peripheral(PD(0),  PERIPH_A, 0); /* DATA6  */
+               select_peripheral(PD(1),  PERIPH_A, 0); /* DATA7  */
+               select_peripheral(PD(2),  PERIPH_A, 0); /* DATA8  */
+               select_peripheral(PD(3),  PERIPH_A, 0); /* DATA9  */
+               select_peripheral(PD(4),  PERIPH_A, 0); /* DATA10 */
+               select_peripheral(PD(5),  PERIPH_A, 0); /* DATA11 */
+               select_peripheral(PD(6),  PERIPH_A, 0); /* DATA12 */
+               select_peripheral(PD(7),  PERIPH_A, 0); /* DATA13 */
+               select_peripheral(PD(8),  PERIPH_A, 0); /* DATA14 */
+               select_peripheral(PD(9),  PERIPH_A, 0); /* DATA15 */
+               select_peripheral(PD(10), PERIPH_A, 0); /* DATA16 */
+               select_peripheral(PD(11), PERIPH_A, 0); /* DATA17 */
+               select_peripheral(PD(12), PERIPH_A, 0); /* DATA18 */
+               select_peripheral(PD(13), PERIPH_A, 0); /* DATA19 */
+               select_peripheral(PD(14), PERIPH_A, 0); /* DATA20 */
+               select_peripheral(PD(15), PERIPH_A, 0); /* DATA21 */
+               select_peripheral(PD(16), PERIPH_A, 0); /* DATA22 */
+               select_peripheral(PD(17), PERIPH_A, 0); /* DATA23 */
 
                clk_set_parent(&lcdc0_pixclk, &pll0);
                clk_set_rate(&lcdc0_pixclk, clk_get_rate(&pll0));
@@ -838,6 +866,8 @@ struct clk *at32_clock_list[] = {
        &atmel_usart3_usart,
        &macb0_hclk,
        &macb0_pclk,
+       &macb1_hclk,
+       &macb1_pclk,
        &spi0_mck,
        &lcdc0_hclk,
        &lcdc0_pixclk,
index 4dff1f98890039f0d96fcbc54e27c76322e3e383..b59272e81b9a049be31be3105d23354742d11388 100644 (file)
@@ -49,12 +49,25 @@ static void eim_unmask_irq(unsigned int irq)
 static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
 {
        struct at32_sm *sm = get_irq_chip_data(irq);
+       struct irq_desc *desc;
        unsigned int i = irq - sm->eim_first_irq;
        u32 mode, edge, level;
        unsigned long flags;
        int ret = 0;
 
-       flow_type &= IRQ_TYPE_SENSE_MASK;
+       if (flow_type == IRQ_TYPE_NONE)
+               flow_type = IRQ_TYPE_LEVEL_LOW;
+
+       desc = &irq_desc[irq];
+       desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
+       desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
+
+       if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
+               desc->status |= IRQ_LEVEL;
+               set_irq_handler(irq, handle_level_irq);
+       } else {
+               set_irq_handler(irq, handle_edge_irq);
+       }
 
        spin_lock_irqsave(&sm->lock, flags);
 
@@ -148,10 +161,15 @@ static int __init eim_init(void)
        pattern = sm_readl(sm, EIM_MODE);
        nr_irqs = fls(pattern);
 
+       /* Trigger on falling edge unless overridden by driver */
+       sm_writel(sm, EIM_MODE, 0UL);
+       sm_writel(sm, EIM_EDGE, 0UL);
+
        sm->eim_chip = &eim_chip;
 
        for (i = 0; i < nr_irqs; i++) {
-               set_irq_chip(sm->eim_first_irq + i, &eim_chip);
+               set_irq_chip_and_handler(sm->eim_first_irq + i, &eim_chip,
+                                        handle_edge_irq);
                set_irq_chip_data(sm->eim_first_irq + i, sm);
        }
 
index eb87a18ad7b2f70c6a49a7595efeca4ac23706b1..dd5c009cf224838379af65d498c35ed7357afcf2 100644 (file)
@@ -136,3 +136,7 @@ fail:
        panic("Interrupt controller initialization failed!\n");
 }
 
+unsigned long intc_get_pending(int group)
+{
+       return intc_readl(&intc0, INTREQ0 + 4 * group);
+}
index d3aabfca85987c35248e87d632827394bae9e70f..f1280ed8ed6dcb6a7b48fd5418d3c116e3bb7794 100644 (file)
@@ -25,27 +25,98 @@ struct pio_device {
        void __iomem *regs;
        const struct platform_device *pdev;
        struct clk *clk;
-       u32 alloc_mask;
+       u32 pinmux_mask;
        char name[32];
 };
 
 static struct pio_device pio_dev[MAX_NR_PIO_DEVICES];
 
-void portmux_set_func(unsigned int portmux_id, unsigned int pin_id,
-                     unsigned int function_id)
+static struct pio_device *gpio_to_pio(unsigned int gpio)
 {
        struct pio_device *pio;
-       u32 mask = 1 << pin_id;
+       unsigned int index;
 
-       BUG_ON(portmux_id >= MAX_NR_PIO_DEVICES);
+       index = gpio >> 5;
+       if (index >= MAX_NR_PIO_DEVICES)
+               return NULL;
+       pio = &pio_dev[index];
+       if (!pio->regs)
+               return NULL;
 
-       pio = &pio_dev[portmux_id];
+       return pio;
+}
+
+/* Pin multiplexing API */
+
+void __init at32_select_periph(unsigned int pin, unsigned int periph,
+                              unsigned long flags)
+{
+       struct pio_device *pio;
+       unsigned int pin_index = pin & 0x1f;
+       u32 mask = 1 << pin_index;
+
+       pio = gpio_to_pio(pin);
+       if (unlikely(!pio)) {
+               printk("pio: invalid pin %u\n", pin);
+               goto fail;
+       }
 
-       if (function_id)
+       if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
+               printk("%s: pin %u is busy\n", pio->name, pin_index);
+               goto fail;
+       }
+
+       pio_writel(pio, PUER, mask);
+       if (periph)
                pio_writel(pio, BSR, mask);
        else
                pio_writel(pio, ASR, mask);
+
        pio_writel(pio, PDR, mask);
+       if (!(flags & AT32_GPIOF_PULLUP))
+               pio_writel(pio, PUDR, mask);
+
+       return;
+
+fail:
+       dump_stack();
+}
+
+void __init at32_select_gpio(unsigned int pin, unsigned long flags)
+{
+       struct pio_device *pio;
+       unsigned int pin_index = pin & 0x1f;
+       u32 mask = 1 << pin_index;
+
+       pio = gpio_to_pio(pin);
+       if (unlikely(!pio)) {
+               printk("pio: invalid pin %u\n", pin);
+               goto fail;
+       }
+
+       if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
+               printk("%s: pin %u is busy\n", pio->name, pin_index);
+               goto fail;
+       }
+
+       pio_writel(pio, PUER, mask);
+       if (flags & AT32_GPIOF_HIGH)
+               pio_writel(pio, SODR, mask);
+       else
+               pio_writel(pio, CODR, mask);
+       if (flags & AT32_GPIOF_OUTPUT)
+               pio_writel(pio, OER, mask);
+       else
+               pio_writel(pio, ODR, mask);
+
+       pio_writel(pio, PER, mask);
+       if (!(flags & AT32_GPIOF_PULLUP))
+               pio_writel(pio, PUDR, mask);
+
+       return;
+
+fail:
+       dump_stack();
 }
 
 static int __init pio_probe(struct platform_device *pdev)
diff --git a/arch/avr32/mach-at32ap/sm.c b/arch/avr32/mach-at32ap/sm.c
deleted file mode 100644 (file)
index 03306eb..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * System Manager driver for AT32AP CPUs
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/random.h>
-#include <linux/spinlock.h>
-
-#include <asm/intc.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include <asm/arch/sm.h>
-
-#include "sm.h"
-
-#define SM_EIM_IRQ_RESOURCE    1
-#define SM_PM_IRQ_RESOURCE     2
-#define SM_RTC_IRQ_RESOURCE    3
-
-#define to_eim(irqc) container_of(irqc, struct at32_sm, irqc)
-
-struct at32_sm system_manager;
-
-int __init at32_sm_init(void)
-{
-       struct resource *regs;
-       struct at32_sm *sm = &system_manager;
-       int ret = -ENXIO;
-
-       regs = platform_get_resource(&at32_sm_device, IORESOURCE_MEM, 0);
-       if (!regs)
-               goto fail;
-
-       spin_lock_init(&sm->lock);
-       sm->pdev = &at32_sm_device;
-
-       ret = -ENOMEM;
-       sm->regs = ioremap(regs->start, regs->end - regs->start + 1);
-       if (!sm->regs)
-               goto fail;
-
-       return 0;
-
-fail:
-       printk(KERN_ERR "Failed to initialize System Manager: %d\n", ret);
-       return ret;
-}
-
-/*
- * External Interrupt Module (EIM).
- *
- * EIM gets level- or edge-triggered interrupts of either polarity
- * from the outside and converts it to active-high level-triggered
- * interrupts that the internal interrupt controller can handle. EIM
- * also provides masking/unmasking of interrupts, as well as
- * acknowledging of edge-triggered interrupts.
- */
-
-static irqreturn_t spurious_eim_interrupt(int irq, void *dev_id,
-                                         struct pt_regs *regs)
-{
-       printk(KERN_WARNING "Spurious EIM interrupt %d\n", irq);
-       disable_irq(irq);
-       return IRQ_NONE;
-}
-
-static struct irqaction eim_spurious_action = {
-       .handler = spurious_eim_interrupt,
-};
-
-static irqreturn_t eim_handle_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
-       struct irq_controller * irqc = dev_id;
-       struct at32_sm *sm = to_eim(irqc);
-       unsigned long pending;
-
-       /*
-        * No need to disable interrupts globally.  The interrupt
-        * level relevant to this group must be masked all the time,
-        * so we know that this particular EIM instance will not be
-        * re-entered.
-        */
-       spin_lock(&sm->lock);
-
-       pending = intc_get_pending(sm->irqc.irq_group);
-       if (unlikely(!pending)) {
-               printk(KERN_ERR "EIM (group %u): No interrupts pending!\n",
-                      sm->irqc.irq_group);
-               goto unlock;
-       }
-
-       do {
-               struct irqaction *action;
-               unsigned int i;
-
-               i = fls(pending) - 1;
-               pending &= ~(1 << i);
-               action = sm->action[i];
-
-               /* Acknowledge the interrupt */
-               sm_writel(sm, EIM_ICR, 1 << i);
-
-               spin_unlock(&sm->lock);
-
-               if (action->flags & SA_INTERRUPT)
-                       local_irq_disable();
-               action->handler(sm->irqc.first_irq + i, action->dev_id, regs);
-               local_irq_enable();
-               spin_lock(&sm->lock);
-               if (action->flags & SA_SAMPLE_RANDOM)
-                       add_interrupt_randomness(sm->irqc.first_irq + i);
-       } while (pending);
-
-unlock:
-       spin_unlock(&sm->lock);
-       return IRQ_HANDLED;
-}
-
-static void eim_mask(struct irq_controller *irqc, unsigned int irq)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       unsigned int i;
-
-       i = irq - sm->irqc.first_irq;
-       sm_writel(sm, EIM_IDR, 1 << i);
-}
-
-static void eim_unmask(struct irq_controller *irqc, unsigned int irq)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       unsigned int i;
-
-       i = irq - sm->irqc.first_irq;
-       sm_writel(sm, EIM_IER, 1 << i);
-}
-
-static int eim_setup(struct irq_controller *irqc, unsigned int irq,
-               struct irqaction *action)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       sm->action[irq - sm->irqc.first_irq] = action;
-       /* Acknowledge earlier interrupts */
-       sm_writel(sm, EIM_ICR, (1<<(irq - sm->irqc.first_irq)));
-       eim_unmask(irqc, irq);
-       return 0;
-}
-
-static void eim_free(struct irq_controller *irqc, unsigned int irq,
-               void *dev)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       eim_mask(irqc, irq);
-       sm->action[irq - sm->irqc.first_irq] = &eim_spurious_action;
-}
-
-static int eim_set_type(struct irq_controller *irqc, unsigned int irq,
-                       unsigned int type)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       unsigned long flags;
-       u32 value, pattern;
-
-       spin_lock_irqsave(&sm->lock, flags);
-
-       pattern = 1 << (irq - sm->irqc.first_irq);
-
-       value = sm_readl(sm, EIM_MODE);
-       if (type & IRQ_TYPE_LEVEL)
-               value |= pattern;
-       else
-               value &= ~pattern;
-       sm_writel(sm, EIM_MODE, value);
-       value = sm_readl(sm, EIM_EDGE);
-       if (type & IRQ_EDGE_RISING)
-               value |= pattern;
-       else
-               value &= ~pattern;
-       sm_writel(sm, EIM_EDGE, value);
-       value = sm_readl(sm, EIM_LEVEL);
-       if (type & IRQ_LEVEL_HIGH)
-               value |= pattern;
-       else
-               value &= ~pattern;
-       sm_writel(sm, EIM_LEVEL, value);
-
-       spin_unlock_irqrestore(&sm->lock, flags);
-
-       return 0;
-}
-
-static unsigned int eim_get_type(struct irq_controller *irqc,
-                                unsigned int irq)
-{
-       struct at32_sm *sm = to_eim(irqc);
-       unsigned long flags;
-       unsigned int type = 0;
-       u32 mode, edge, level, pattern;
-
-       pattern = 1 << (irq - sm->irqc.first_irq);
-
-       spin_lock_irqsave(&sm->lock, flags);
-       mode = sm_readl(sm, EIM_MODE);
-       edge = sm_readl(sm, EIM_EDGE);
-       level = sm_readl(sm, EIM_LEVEL);
-       spin_unlock_irqrestore(&sm->lock, flags);
-
-       if (mode & pattern)
-               type |= IRQ_TYPE_LEVEL;
-       if (edge & pattern)
-               type |= IRQ_EDGE_RISING;
-       if (level & pattern)
-               type |= IRQ_LEVEL_HIGH;
-
-       return type;
-}
-
-static struct irq_controller_class eim_irq_class = {
-       .typename       = "EIM",
-       .handle         = eim_handle_irq,
-       .setup          = eim_setup,
-       .free           = eim_free,
-       .mask           = eim_mask,
-       .unmask         = eim_unmask,
-       .set_type       = eim_set_type,
-       .get_type       = eim_get_type,
-};
-
-static int __init eim_init(void)
-{
-       struct at32_sm *sm = &system_manager;
-       unsigned int i;
-       u32 pattern;
-       int ret;
-
-       /*
-        * The EIM is really the same module as SM, so register
-        * mapping, etc. has been taken care of already.
-        */
-
-       /*
-        * Find out how many interrupt lines that are actually
-        * implemented in hardware.
-        */
-       sm_writel(sm, EIM_IDR, ~0UL);
-       sm_writel(sm, EIM_MODE, ~0UL);
-       pattern = sm_readl(sm, EIM_MODE);
-       sm->irqc.nr_irqs = fls(pattern);
-
-       ret = -ENOMEM;
-       sm->action = kmalloc(sizeof(*sm->action) * sm->irqc.nr_irqs,
-                            GFP_KERNEL);
-       if (!sm->action)
-               goto out;
-
-       for (i = 0; i < sm->irqc.nr_irqs; i++)
-               sm->action[i] = &eim_spurious_action;
-
-       spin_lock_init(&sm->lock);
-       sm->irqc.irq_group = sm->pdev->resource[SM_EIM_IRQ_RESOURCE].start;
-       sm->irqc.class = &eim_irq_class;
-
-       ret = intc_register_controller(&sm->irqc);
-       if (ret < 0)
-               goto out_free_actions;
-
-       printk("EIM: External Interrupt Module at 0x%p, IRQ group %u\n",
-              sm->regs, sm->irqc.irq_group);
-       printk("EIM: Handling %u external IRQs, starting with IRQ%u\n",
-              sm->irqc.nr_irqs, sm->irqc.first_irq);
-
-       return 0;
-
-out_free_actions:
-       kfree(sm->action);
-out:
-       return ret;
-}
-arch_initcall(eim_init);
index 1f9300f37f52f36528da5f07b3c6a1c797e89a63..96e941084c04f1fa2beae8d912e8381b062a0c5a 100644 (file)
@@ -644,7 +644,85 @@ CONFIG_CONNECTOR=m
 #
 # Memory Technology Devices (MTD)
 #
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x0
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=0
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
 
 #
 # Parallel port support
index 2f4508f55fcaf2673ecceaa7f061177825c5dcac..cecff24cc9721a16e7abdb9e04980f73b53293ec 100644 (file)
@@ -109,6 +109,10 @@ SECTIONS
   .con_initcall.init : { *(.con_initcall.init) }
   __con_initcall_end = .;
   SECURITY_INIT
+    /* .exit.text is discarded at runtime, not link time, to deal with
+     references from .rodata */
+  .exit.text : { *(.exit.text) }
+  .exit.data : { *(.exit.data) }
   . = ALIGN(_PAGE_SIZE);
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
@@ -136,8 +140,6 @@ SECTIONS
 
   /* Sections to be discarded */
   /DISCARD/ : {
-       *(.exit.text)
-        *(.exit.data)
         *(.exitcall.exit)
 
        /* ABI crap starts here */
index 77ee5c6d33c119964da2619f5484cf83772e3b49..b662c75fb28e1080a6667f29c2a5775f774ec23e 100644 (file)
@@ -19,5 +19,5 @@
 # under Linux.
 #
 
-obj-y := malta_int.o malta_setup.o
+obj-y := malta_int.o malta_mtd.o malta_setup.o
 obj-$(CONFIG_SMP) += malta_smp.o
index 282f3e52eea3622eb1d00fd9ca710ac8e41cbda4..56ea76679cd41d816313f0a1eec1b13c19a5900e 100644 (file)
 #include <linux/pci.h>
 #include <linux/screen_info.h>
 
-#ifdef CONFIG_MTD
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#endif
-
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
@@ -58,30 +51,6 @@ struct resource standard_io_resources[] = {
        { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_BUSY },
 };
 
-#ifdef CONFIG_MTD
-static struct mtd_partition malta_mtd_partitions[] = {
-       {
-               .name =         "YAMON",
-               .offset =       0x0,
-               .size =         0x100000,
-               .mask_flags =   MTD_WRITEABLE
-       },
-       {
-               .name =         "User FS",
-               .offset =       0x100000,
-               .size =         0x2e0000
-       },
-       {
-               .name =         "Board Config",
-               .offset =       0x3e0000,
-               .size =         0x020000,
-               .mask_flags =   MTD_WRITEABLE
-       }
-};
-
-#define number_partitions      (sizeof(malta_mtd_partitions)/sizeof(struct mtd_partition))
-#endif
-
 const char *get_system_type(void)
 {
        return "MIPS Malta";
@@ -211,14 +180,6 @@ void __init plat_mem_setup(void)
 #endif
 #endif
 
-#ifdef CONFIG_MTD
-       /*
-        * Support for MTD on Malta. Use the generic physmap driver
-        */
-       physmap_configure(0x1e000000, 0x400000, 4, NULL);
-       physmap_set_partitions(malta_mtd_partitions, number_partitions);
-#endif
-
        mips_reboot_setup();
 
        board_time_init = mips_time_init;
index 9e29ba9205f0c187925edd51214849f4567d89f5..ea2d15370bb72ecef36f99656049b092073bff9c 100644 (file)
@@ -316,7 +316,7 @@ static int __init page_is_ram(unsigned long pagenr)
 void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-       unsigned long max_dma, high, low;
+       unsigned long max_dma, low;
 #ifndef CONFIG_FLATMEM
        unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
        unsigned long i, j, pfn;
@@ -331,7 +331,6 @@ void __init paging_init(void)
 
        max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
        low = max_low_pfn;
-       high = highend_pfn;
 
 #ifdef CONFIG_ISA
        if (low < max_dma)
@@ -344,13 +343,13 @@ void __init paging_init(void)
        zones_size[ZONE_DMA] = low;
 #endif
 #ifdef CONFIG_HIGHMEM
-       if (cpu_has_dc_aliases) {
-               printk(KERN_WARNING "This processor doesn't support highmem.");
-               if (high - low)
-                       printk(" %ldk highmem ignored", high - low);
-               printk("\n");
-       } else
-               zones_size[ZONE_HIGHMEM] = high - low;
+       zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
+
+       if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
+               printk(KERN_WARNING "This processor doesn't support highmem."
+                      " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
+               zones_size[ZONE_HIGHMEM] = 0;
+       }
 #endif
 
 #ifdef CONFIG_FLATMEM
index 56c3c4065eb0c354fab33618ded153b43a1648b7..8699dadcd0966028ed7207b333486b2295a4fc12 100644 (file)
@@ -107,6 +107,11 @@ config AUDIT_ARCH
        bool
        default y
 
+config GENERIC_BUG
+       bool
+       default y
+       depends on BUG
+
 config DEFAULT_UIMAGE
        bool
        help
@@ -478,6 +483,7 @@ config PPC_MAPLE
        select PPC_UDBG_16550
        select PPC_970_NAP
        select PPC_NATIVE
+       select PPC_RTAS
        default n
        help
           This option enables support for the Maple 970FX Evaluation Board.
@@ -714,7 +720,7 @@ config FORCE_MAX_ZONEORDER
 
 config MATH_EMULATION
        bool "Math emulation"
-       depends on 4xx || 8xx || E200 || E500
+       depends on 4xx || 8xx || E200 || PPC_83xx || E500
        ---help---
          Some PowerPC chips designed for embedded applications do not have
          a floating-point unit and therefore do not implement the
index f2d888e014a9f81cbf296ec2ca8af3bf8ef570d9..70ed61337f5c03ad20b3775055c2baf4a99b0466 100644 (file)
@@ -157,6 +157,7 @@ CONFIG_SPU_BASE=y
 CONFIG_PS3_HTAB_SIZE=20
 CONFIG_PS3_DYNAMIC_DMA=y
 CONFIG_PS3_USE_LPAR_ADDR=y
+CONFIG_PS3_VUART=y
 
 #
 # Kernel options
index 4fe53d08ab817e79a4810b1492e3a5e4bff02661..d2ded19e40643f4b7bafdaa28ba89ad0b443462d 100644 (file)
@@ -77,6 +77,7 @@ endif
 
 ifeq ($(CONFIG_PPC_ISERIES),y)
 extra-y += lparmap.s
+$(obj)/head_64.o:      $(obj)/lparmap.s
 AFLAGS_head_64.o += -I$(obj)
 endif
 
index 9d1614c3ce67d047adf9573dcb40704602783a5a..b742013bb9da2f6fbc70b86d2fea909e707e6881 100644 (file)
@@ -833,7 +833,7 @@ static struct cpu_spec cpu_specs[] = {
                .pvr_mask               = 0x7fff0000,
                .pvr_value              = 0x00840000,
                .cpu_name               = "e300c2",
-               .cpu_features           = CPU_FTRS_E300,
+               .cpu_features           = CPU_FTRS_E300C2,
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
@@ -1136,8 +1136,7 @@ static struct cpu_spec cpu_specs[] = {
                .pvr_mask               = 0xff000fff,
                .pvr_value              = 0x53000890,
                .cpu_name               = "440SPe Rev. A",
-               .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
-                       CPU_FTR_USE_TB,
+               .cpu_features           = CPU_FTRS_44X,
                .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
index d88e182e40b3849ba085ec3f83265adac690e538..9417cf5b4b7e7af4b361b0075a3b3c4d96dd2416 100644 (file)
@@ -437,6 +437,13 @@ Alignment:
 /* Floating-point unavailable */
        . = 0x800
 FPUnavailable:
+BEGIN_FTR_SECTION
+/*
+ * Certain Freescale cores don't have a FPU and treat fp instructions
+ * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
+ */
+       b       ProgramCheck
+END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
        EXCEPTION_PROLOG
        bne     load_up_fpu             /* if from user, just load it up */
        addi    r3,r1,STACK_FRAME_OVERHEAD
index e2c3c6a85f33b4180cd8f9d812e831e2f4c4176c..8339fd609de045c6c92777594009f9492391163d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/cache.h>
+#include <linux/bug.h>
 
 #include "setup.h"
 
@@ -290,23 +291,11 @@ int module_finalize(const Elf_Ehdr *hdr,
                    struct module *me)
 {
        const Elf_Shdr *sect;
+       int err;
 
-       me->arch.bug_table = NULL;
-       me->arch.num_bugs = 0;
-
-       /* Find the __bug_table section, if present */
-       sect = find_section(hdr, sechdrs, "__bug_table");
-       if (sect != NULL) {
-               me->arch.bug_table = (void *) sect->sh_addr;
-               me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
-       }
-
-       /*
-        * Strictly speaking this should have a spinlock to protect against
-        * traversals, but since we only traverse on BUG()s, a spinlock
-        * could potentially lead to deadlock and thus be counter-productive.
-        */
-       list_add(&me->arch.bug_list, &module_bug_list);
+       err = module_bug_finalize(hdr, sechdrs, me);
+       if (err)                /* never true, currently */
+               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -320,7 +309,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       list_del(&mod->arch.bug_list);
+       module_bug_cleanup(mod);
 }
 
 struct bug_entry *module_find_bug(unsigned long bugaddr)
index 8dd1f0aae5d6265809748dcc0e0001b2a55e440e..75c7c4f1928059689353927c2c67d198c62e715f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/moduleloader.h>
 #include <linux/err.h>
 #include <linux/vmalloc.h>
+#include <linux/bug.h>
 #include <asm/module.h>
 #include <asm/uaccess.h>
 #include <asm/firmware.h>
@@ -439,23 +440,11 @@ int module_finalize(const Elf_Ehdr *hdr,
                const Elf_Shdr *sechdrs, struct module *me)
 {
        const Elf_Shdr *sect;
+       int err;
 
-       me->arch.bug_table = NULL;
-       me->arch.num_bugs = 0;
-
-       /* Find the __bug_table section, if present */
-       sect = find_section(hdr, sechdrs, "__bug_table");
-       if (sect != NULL) {
-               me->arch.bug_table = (void *) sect->sh_addr;
-               me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
-       }
-
-       /*
-        * Strictly speaking this should have a spinlock to protect against
-        * traversals, but since we only traverse on BUG()s, a spinlock
-        * could potentially lead to deadlock and thus be counter-productive.
-        */
-       list_add(&me->arch.bug_list, &module_bug_list);
+       err = module_bug_finalize(hdr, sechdrs, me);
+       if (err)
+               return err;
 
        /* Apply feature fixups */
        sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -475,7 +464,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 
 void module_arch_cleanup(struct module *mod)
 {
-       list_del(&mod->arch.bug_list);
+       module_bug_cleanup(mod);
 }
 
 struct bug_entry *module_find_bug(unsigned long bugaddr)
index 8a06724e029e4fd1f8cee862b95bd7101839ed20..e921514e655bd45a2b96438151e2a2f07d66b10e 100644 (file)
@@ -109,9 +109,7 @@ int of_device_register(struct of_device *ofdev)
        if (rc)
                return rc;
 
-       device_create_file(&ofdev->dev, &dev_attr_devspec);
-
-       return 0;
+       return device_create_file(&ofdev->dev, &dev_attr_devspec);
 }
 
 void of_device_unregister(struct of_device *ofdev)
index b3189d0161b8d7b4f9a60ac0db8c1804a55584a9..3002ea3a61a24df59fd437ecbd80866af1e3c6d6 100644 (file)
@@ -169,7 +169,7 @@ static void of_platform_make_bus_id(struct of_device *dev)
        char *name = dev->dev.bus_id;
        const u32 *reg;
        u64 addr;
-       long magic;
+       int magic;
 
        /*
         * If it's a DCR based device, use 'd' for native DCRs
index 2f54cd81dea571ce82468d82361ae0a67e397f3e..8336deafc624fd7973c9a36e412787f890e8aeb1 100644 (file)
@@ -736,25 +736,51 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
        return NULL;
 }
 
-static int
-scan_OF_pci_childs_iterator(struct device_node* node, void* data)
+static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
+                                              unsigned int devfn)
 {
-       const unsigned int *reg;
-       u8* fdata = (u8*)data;
-       
-       reg = get_property(node, "reg", NULL);
-       if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
-               && ((reg[0] >> 16) & 0xff) == fdata[0])
-               return 1;
-       return 0;
+       struct device_node *np = NULL;
+       const u32 *reg;
+       unsigned int psize;
+
+       while ((np = of_get_next_child(parent, np)) != NULL) {
+               reg = get_property(np, "reg", &psize);
+               if (reg == NULL || psize < 4)
+                       continue;
+               if (((reg[0] >> 8) & 0xff) == devfn)
+                       return np;
+       }
+       return NULL;
 }
 
-static struct device_node*
-scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
+
+static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
 {
-       u8 filter_data[2] = {bus, dev_fn};
+       struct device_node *parent, *np;
+
+       /* Are we a root bus ? */
+       if (bus->self == NULL || bus->parent == NULL) {
+               struct pci_controller *hose = pci_bus_to_hose(bus->number);
+               if (hose == NULL)
+                       return NULL;
+               return of_node_get(hose->arch_data);
+       }
+
+       /* not a root bus, we need to get our parent */
+       parent = scan_OF_for_pci_bus(bus->parent);
+       if (parent == NULL)
+               return NULL;
+
+       /* now iterate for children for a match */
+       np = scan_OF_for_pci_dev(parent, bus->self->devfn);
+       of_node_put(parent);
 
-       return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
+       /* sanity check */
+       if (strcmp(np->type, "pci") != 0)
+               printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
+                      np->type, np->full_name);
+
+       return np;
 }
 
 /*
@@ -763,43 +789,25 @@ scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
 struct device_node *
 pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
 {
-       struct pci_controller *hose;
-       struct device_node *node;
-       int busnr;
+       struct device_node *parent, *np;
 
        if (!have_of)
                return NULL;
-       
-       /* Lookup the hose */
-       busnr = bus->number;
-       hose = pci_bus_to_hose(busnr);
-       if (!hose)
-               return NULL;
 
-       /* Check it has an OF node associated */
-       node = (struct device_node *) hose->arch_data;
-       if (!node)
+       DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
+       parent = scan_OF_for_pci_bus(bus);
+       if (parent == NULL)
                return NULL;
-
-       /* Fixup bus number according to what OF think it is. */
-#ifdef CONFIG_PPC_PMAC
-       /* The G5 need a special case here. Basically, we don't remap all
-        * busses on it so we don't create the pci-OF-map. However, we do
-        * remap the AGP bus and so have to deal with it. A future better
-        * fix has to be done by making the remapping per-host and always
-        * filling the pci_to_OF map. --BenH
+       DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
+       np = scan_OF_for_pci_dev(parent, devfn);
+       of_node_put(parent);
+       DBG(" result is %s\n", np ? np->full_name : "<NULL>");
+
+       /* XXX most callers don't release the returned node
+        * mostly because ppc64 doesn't increase the refcount,
+        * we need to fix that.
         */
-       if (machine_is(powermac) && busnr >= 0xf0)
-               busnr -= 0xf0;
-       else
-#endif
-       if (pci_to_OF_bus_map)
-               busnr = pci_to_OF_bus_map[busnr];
-       if (busnr == 0xff)
-               return NULL;
-       
-       /* Now, lookup childs of the hose */
-       return scan_OF_childs_for_device(node->child, busnr, devfn);
+       return np;
 }
 EXPORT_SYMBOL(pci_busdev_to_OF_node);
 
@@ -1544,7 +1552,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
 
 
 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-                                              unsigned long *offset,
+                                              resource_size_t *offset,
                                               enum pci_mmap_state mmap_state)
 {
        struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -1556,7 +1564,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
 
        /* If memory, add on the PCI bridge address offset */
        if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
                *offset += hose->pci_mem_offset;
+#endif
                res_bit = IORESOURCE_MEM;
        } else {
                io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
@@ -1624,9 +1634,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
        else
                prot |= _PAGE_GUARDED;
 
-       printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
-               (unsigned long long)rp->start, prot);
-
        return __pgprot(prot);
 }
 
@@ -1695,7 +1702,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                        enum pci_mmap_state mmap_state,
                        int write_combine)
 {
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
        struct resource *rp;
        int ret;
 
@@ -1808,22 +1815,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          resource_size_t *start, resource_size_t *end)
 {
        struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-       unsigned long offset = 0;
+       resource_size_t offset = 0;
 
        if (hose == NULL)
                return;
 
        if (rsrc->flags & IORESOURCE_IO)
-               offset = (void __iomem *)_IO_BASE - hose->io_base_virt
-                       + hose->io_base_phys;
+               offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+
+       /* We pass a fully fixed up address to userland for MMIO instead of
+        * a BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem !
+        *
+        * That means that we'll have potentially 64 bits values where some
+        * userland apps only expect 32 (like X itself since it thinks only
+        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+        * 32 bits CHRPs :-(
+        *
+        * Hopefully, the sysfs insterface is immune to that gunk. Once X
+        * has been fixed (and the fix spread enough), we can re-enable the
+        * 2 lines below and pass down a BAR value to userland. In that case
+        * we'll also have to re-enable the matching code in
+        * __pci_mmap_make_offset().
+        *
+        * BenH.
+        */
+#if 0
+       else if (rsrc->flags & IORESOURCE_MEM)
+               offset = hose->pci_mem_offset;
+#endif
 
-       *start = rsrc->start + offset;
-       *end = rsrc->end + offset;
+       *start = rsrc->start - offset;
+       *end = rsrc->end - offset;
 }
 
-void __init
-pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
-                 int flags, char *name)
+void __init pci_init_resource(struct resource *res, resource_size_t start,
+                             resource_size_t end, int flags, char *name)
 {
        res->start = start;
        res->end = end;
index 6fa9a0a5c8dbb7f0a700c0e88048e7037caaa63f..a6b7692c72690bc592e1429d08a921e23f9ed946 100644 (file)
@@ -682,7 +682,7 @@ int pci_proc_domain(struct pci_bus *bus)
  * Returns negative error code on failure, zero on success.
  */
 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-                                              unsigned long *offset,
+                                              resource_size_t *offset,
                                               enum pci_mmap_state mmap_state)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -694,7 +694,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
 
        /* If memory, add on the PCI bridge address offset */
        if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
                *offset += hose->pci_mem_offset;
+#endif
                res_bit = IORESOURCE_MEM;
        } else {
                io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
@@ -762,9 +764,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
        else
                prot |= _PAGE_GUARDED;
 
-       printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
-              prot);
-
        return __pgprot(prot);
 }
 
@@ -832,7 +831,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                        enum pci_mmap_state mmap_state, int write_combine)
 {
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
        struct resource *rp;
        int ret;
 
@@ -1333,20 +1332,41 @@ EXPORT_SYMBOL(pci_read_irq_line);
 
 void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          const struct resource *rsrc,
-                         u64 *start, u64 *end)
+                         resource_size_t *start, resource_size_t *end)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       unsigned long offset = 0;
+       resource_size_t offset = 0;
 
        if (hose == NULL)
                return;
 
        if (rsrc->flags & IORESOURCE_IO)
-               offset = pci_io_base - (unsigned long)hose->io_base_virt +
-                       hose->io_base_phys;
+               offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+       /* We pass a fully fixed up address to userland for MMIO instead of
+        * a BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem !
+        *
+        * That means that we'll have potentially 64 bits values where some
+        * userland apps only expect 32 (like X itself since it thinks only
+        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+        * 32 bits CHRPs :-(
+        *
+        * Hopefully, the sysfs insterface is immune to that gunk. Once X
+        * has been fixed (and the fix spread enough), we can re-enable the
+        * 2 lines below and pass down a BAR value to userland. In that case
+        * we'll also have to re-enable the matching code in
+        * __pci_mmap_make_offset().
+        *
+        * BenH.
+        */
+#if 0
+       else if (rsrc->flags & IORESOURCE_MEM)
+               offset = hose->pci_mem_offset;
+#endif
 
-       *start = rsrc->start + offset;
-       *end = rsrc->end + offset;
+       *start = rsrc->start - offset;
+       *end = rsrc->end - offset;
 }
 
 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
index 9179f0739ea2c3afb952fad59fa21017bd1fc403..95776b6af4e2892513a70d118a73e70b39d278f9 100644 (file)
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
 extern long *intercept_table;
 EXPORT_SYMBOL(intercept_table);
 #endif /* CONFIG_PPC_STD_MMU_32 */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_DCR_NATIVE
 EXPORT_SYMBOL(__mtdcr);
 EXPORT_SYMBOL(__mfdcr);
 #endif
index c18dbe77fdc29863eb5a4f3e57c2f5f62e0f93dc..1fc732a552db87ee7a7d0e9aedc4535fc51a8158 100644 (file)
@@ -804,6 +804,56 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
        return of_read_ulong(p, s);
 }
 
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Interpret the ibm,dynamic-memory property in the
+ * /ibm,dynamic-reconfiguration-memory node.
+ * This contains a list of memory blocks along with NUMA affinity
+ * information.
+ */
+static int __init early_init_dt_scan_drconf_memory(unsigned long node)
+{
+       cell_t *dm, *ls;
+       unsigned long l, n;
+       unsigned long base, size, lmb_size, flags;
+
+       ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+       if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+               return 0;
+       lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
+
+       dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
+       if (dm == NULL || l < sizeof(cell_t))
+               return 0;
+
+       n = *dm++;      /* number of entries */
+       if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+               return 0;
+
+       for (; n != 0; --n) {
+               base = dt_mem_next_cell(dt_root_addr_cells, &dm);
+               flags = dm[3];
+               /* skip DRC index, pad, assoc. list index, flags */
+               dm += 4;
+               /* skip this block if the reserved bit is set in flags (0x80)
+                  or if the block is not assigned to this partition (0x8) */
+               if ((flags & 0x80) || !(flags & 0x8))
+                       continue;
+               size = lmb_size;
+               if (iommu_is_off) {
+                       if (base >= 0x80000000ul)
+                               continue;
+                       if ((base + size) > 0x80000000ul)
+                               size = 0x80000000ul - base;
+               }
+               lmb_add(base, size);
+       }
+       lmb_dump_all();
+       return 0;
+}
+#else
+#define early_init_dt_scan_drconf_memory(node) 0
+#endif /* CONFIG_PPC_PSERIES */
 
 static int __init early_init_dt_scan_memory(unsigned long node,
                                            const char *uname, int depth, void *data)
@@ -812,6 +862,11 @@ static int __init early_init_dt_scan_memory(unsigned long node,
        cell_t *reg, *endp;
        unsigned long l;
 
+       /* Look for the ibm,dynamic-reconfiguration-memory node */
+       if (depth == 1 &&
+           strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
+               return early_init_dt_scan_drconf_memory(node);
+
        /* We are scanning "memory" nodes only */
        if (type == NULL) {
                /*
index 46cf32670ddb67cab23dfe986e686d7bea0ffbb7..520ef42f642ecca95c2f04ce3a4480fa3f913c33 100644 (file)
@@ -679,7 +679,7 @@ static unsigned char ibm_architecture_vec[] = {
        /* option vector 5: PAPR/OF options */
        3 - 2,                          /* length */
        0,                              /* don't ignore, don't halt */
-       OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
+       OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY,
 };
 
 /* Old method - ELF header with PT_NOTE sections */
index 387ed0d9ad618b3597a7f0931af169993ba103ee..76b5d7ebdcc681fe0b8f4730300b90bc55827fce 100644 (file)
@@ -303,6 +303,12 @@ int rtas_token(const char *service)
 }
 EXPORT_SYMBOL(rtas_token);
 
+int rtas_service_present(const char *service)
+{
+       return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
+}
+EXPORT_SYMBOL(rtas_service_present);
+
 #ifdef CONFIG_RTAS_ERROR_LOGGING
 /*
  * Return the firmware-specified size of the error log buffer
@@ -810,32 +816,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
        return 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
-       /* The token is initialized for real in setup_system() */
-       .token = RTAS_UNKNOWN_SERVICE,
-       .nargs = 0,
-       .nret = 1,
-       .rets = &rtas_stop_self_args.args[0],
-};
-
-void rtas_stop_self(void)
-{
-       struct rtas_args *rtas_args = &rtas_stop_self_args;
-
-       local_irq_disable();
-
-       BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
-
-       printk("cpu %u (hwid %u) Ready to die...\n",
-              smp_processor_id(), hard_smp_processor_id());
-       enter_rtas(__pa(rtas_args));
-
-       panic("Alas, I survived.\n");
-}
-#endif
-
 /*
  * Call early during boot, before mem init or bootmem, to retrieve the RTAS
  * informations from the device-tree and allocate the RMO buffer for userland
@@ -880,9 +860,6 @@ void __init rtas_initialize(void)
 #endif
        rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       rtas_stop_self_args.token = rtas_token("stop-self");
-#endif /* CONFIG_HOTPLUG_CPU */
 #ifdef CONFIG_RTAS_ERROR_LOGGING
        rtas_last_error_token = rtas_token("rtas-last-error");
 #endif
index 63ed265b7f0936765a1cc7bc9356404acf679e68..400ab2b946e760964bd164f282ec1f270579ac6a 100644 (file)
@@ -181,6 +181,8 @@ SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
 SYSFS_PMCSETUP(purr, SPRN_PURR);
+SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+SYSFS_PMCSETUP(dscr, SPRN_DSCR);
 
 static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
 static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
@@ -194,6 +196,8 @@ static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
 static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
 static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
 static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
+static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
+static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
 
 static void register_cpu_online(unsigned int cpu)
 {
@@ -231,6 +235,12 @@ static void register_cpu_online(unsigned int cpu)
 
        if (cpu_has_feature(CPU_FTR_PURR))
                sysdev_create_file(s, &attr_purr);
+
+       if (cpu_has_feature(CPU_FTR_SPURR))
+               sysdev_create_file(s, &attr_spurr);
+
+       if (cpu_has_feature(CPU_FTR_DSCR))
+               sysdev_create_file(s, &attr_dscr);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -272,6 +282,12 @@ static void unregister_cpu_online(unsigned int cpu)
 
        if (cpu_has_feature(CPU_FTR_PURR))
                sysdev_remove_file(s, &attr_purr);
+
+       if (cpu_has_feature(CPU_FTR_SPURR))
+               sysdev_remove_file(s, &attr_spurr);
+
+       if (cpu_has_feature(CPU_FTR_DSCR))
+               sysdev_remove_file(s, &attr_dscr);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
index 0d4e203fa7a05d90751ff0e7e60cb891f413d6af..535f50665647eb490cdc32fd48223c780d55adfd 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kprobes.h>
 #include <linux/kexec.h>
 #include <linux/backlight.h>
+#include <linux/bug.h>
 
 #include <asm/kdebug.h>
 #include <asm/pgtable.h>
@@ -727,54 +728,9 @@ static int emulate_instruction(struct pt_regs *regs)
        return -EINVAL;
 }
 
-/*
- * Look through the list of trap instructions that are used for BUG(),
- * BUG_ON() and WARN_ON() and see if we hit one.  At this point we know
- * that the exception was caused by a trap instruction of some kind.
- * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
- * otherwise.
- */
-extern struct bug_entry __start___bug_table[], __stop___bug_table[];
-
-#ifndef CONFIG_MODULES
-#define module_find_bug(x)     NULL
-#endif
-
-struct bug_entry *find_bug(unsigned long bugaddr)
+int is_valid_bugaddr(unsigned long addr)
 {
-       struct bug_entry *bug;
-
-       for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
-               if (bugaddr == bug->bug_addr)
-                       return bug;
-       return module_find_bug(bugaddr);
-}
-
-static int check_bug_trap(struct pt_regs *regs)
-{
-       struct bug_entry *bug;
-       unsigned long addr;
-
-       if (regs->msr & MSR_PR)
-               return 0;       /* not in kernel */
-       addr = regs->nip;       /* address of trap instruction */
-       if (addr < PAGE_OFFSET)
-               return 0;
-       bug = find_bug(regs->nip);
-       if (bug == NULL)
-               return 0;
-       if (bug->line & BUG_WARNING_TRAP) {
-               /* this is a WARN_ON rather than BUG/BUG_ON */
-               printk(KERN_ERR "Badness in %s at %s:%ld\n",
-                      bug->function, bug->file,
-                      bug->line & ~BUG_WARNING_TRAP);
-               dump_stack();
-               return 1;
-       }
-       printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
-              bug->function, bug->file, bug->line);
-
-       return 0;
+       return is_kernel_addr(addr);
 }
 
 void __kprobes program_check_exception(struct pt_regs *regs)
@@ -782,6 +738,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        unsigned int reason = get_reason(regs);
        extern int do_mathemu(struct pt_regs *regs);
 
+       /* We can now get here via a FP Unavailable exception if the core
+        * has no FPU, in that case no reason flags will be set */
 #ifdef CONFIG_MATH_EMULATION
        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
         * but there seems to be a hardware bug on the 405GP (RevD)
@@ -808,7 +766,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                        return;
                if (debugger_bpt(regs))
                        return;
-               if (check_bug_trap(regs)) {
+
+               if (!(regs->msr & MSR_PR) &&  /* not user-mode */
+                   report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
                        regs->nip += 4;
                        return;
                }
index 04b98671a0608d6ced7470712291bde24d3e01ac..04b8e71bf5b0087a814fba70bd47dce22eda717f 100644 (file)
@@ -62,11 +62,7 @@ SECTIONS
                __stop___ex_table = .;
        }
 
-       __bug_table : {
-               __start___bug_table = .;
-               *(__bug_table)
-               __stop___bug_table = .;
-       }
+       BUG_TABLE
 
 /*
  * Init sections discarded at runtime
index 9da01dc8cfd9d3fc0722de803d7cfbfb9e0ab573..262790910ff23900f5bef2ab057feb4ff5a4e636 100644 (file)
@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
        return lmb_end_of_DRAM() - start;
 }
 
+/*
+ * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
+ * node.  This assumes n_mem_{addr,size}_cells have been set.
+ */
+static void __init parse_drconf_memory(struct device_node *memory)
+{
+       const unsigned int *lm, *dm, *aa;
+       unsigned int ls, ld, la;
+       unsigned int n, aam, aalen;
+       unsigned long lmb_size, size;
+       int nid, default_nid = 0;
+       unsigned int start, ai, flags;
+
+       lm = get_property(memory, "ibm,lmb-size", &ls);
+       dm = get_property(memory, "ibm,dynamic-memory", &ld);
+       aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
+       if (!lm || !dm || !aa ||
+           ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
+           la < 2 * sizeof(unsigned int))
+               return;
+
+       lmb_size = read_n_cells(n_mem_size_cells, &lm);
+       n = *dm++;              /* number of LMBs */
+       aam = *aa++;            /* number of associativity lists */
+       aalen = *aa++;          /* length of each associativity list */
+       if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
+           la < (aam * aalen + 2) * sizeof(unsigned int))
+               return;
+
+       for (; n != 0; --n) {
+               start = read_n_cells(n_mem_addr_cells, &dm);
+               ai = dm[2];
+               flags = dm[3];
+               dm += 4;
+               /* 0x80 == reserved, 0x8 = assigned to us */
+               if ((flags & 0x80) || !(flags & 0x8))
+                       continue;
+               nid = default_nid;
+               /* flags & 0x40 means associativity index is invalid */
+               if (min_common_depth > 0 && min_common_depth <= aalen &&
+                   (flags & 0x40) == 0 && ai < aam) {
+                       /* this is like of_node_to_nid_single */
+                       nid = aa[ai * aalen + min_common_depth - 1];
+                       if (nid == 0xffff || nid >= MAX_NUMNODES)
+                               nid = default_nid;
+               }
+               node_set_online(nid);
+
+               size = numa_enforce_memory_limit(start, lmb_size);
+               if (!size)
+                       continue;
+
+               add_active_range(nid, start >> PAGE_SHIFT,
+                                (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+       }
+}
+
 static int __init parse_numa_properties(void)
 {
        struct device_node *cpu = NULL;
@@ -385,6 +442,14 @@ new_range:
                        goto new_range;
        }
 
+       /*
+        * Now do the same thing for each LMB listed in the ibm,dynamic-memory
+        * property in the ibm,dynamic-reconfiguration-memory node.
+        */
+       memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+       if (memory)
+               parse_drconf_memory(memory);
+
        return 0;
 }
 
index a375c15b4315b502517a9d65510fc5eeb509bc84..eaff71e74fb0f8a783465715f73560c877a81c69 100644 (file)
@@ -40,8 +40,6 @@
 #include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
 #include <asm/of_platform.h>
 
 #include <asm/mpc52xx.h>
index 616a0a3fd0e2ad9c10330590b3d5c0a785e2317d..70e0d968d30f9e5edd77efdc7ce01451586440e3 100644 (file)
@@ -115,6 +115,7 @@ static struct sysdev_attribute attr_spu_temperature = {
 
 static struct attribute *spu_attributes[] = {
        &attr_spu_temperature.attr,
+       NULL,
 };
 
 static struct attribute_group spu_attribute_group = {
@@ -135,6 +136,7 @@ static struct sysdev_attribute attr_ppe_temperature1 = {
 static struct attribute *ppe_attributes[] = {
        &attr_ppe_temperature0.attr,
        &attr_ppe_temperature1.attr,
+       NULL,
 };
 
 static struct attribute_group ppe_attribute_group = {
index 99c612025e8fa824dc35cb34741fc6672f6a2e39..d04ae1671e6ce5d40f413f820efeb645169d3ba2 100644 (file)
@@ -382,11 +382,14 @@ static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-int __init cbe_init_pm_irq(void)
+static int __init cbe_init_pm_irq(void)
 {
        unsigned int irq;
        int rc, node;
 
+       if (!machine_is(cell))
+               return 0;
+
        for_each_node(node) {
                irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
                                               (node << IIC_IRQ_NODE_SHIFT));
index 26945c491f6b21bd7783bff75478df20a9c9aafe..725e19561159dd60bf492d0d87816f3f76767e51 100644 (file)
@@ -147,7 +147,7 @@ static int spufs_arch_notes_size(void)
        struct fdtable *fdt = files_fdtable(current->files);
        int size = 0, fd;
 
-       for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
+       for (fd = 0; fd < fdt->max_fds; fd++) {
                if (FD_ISSET(fd, fdt->open_fds)) {
                        struct file *file = fcheck(fd);
 
index 3a32deda765dab3487e1174467f923ceede19fc6..3f6a69f67195a0e7172a072925d6461eedc388fa 100644 (file)
@@ -562,7 +562,7 @@ void __init maple_pci_init(void)
        for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
                if (np->name == NULL)
                        continue;
-               if (strcmp(np->name, "pci") == 0) {
+               if (!strcmp(np->name, "pci") || !strcmp(np->name, "pcie")) {
                        if (add_bridge(np) == 0)
                                of_node_get(np);
                }
index 094989d50babaf0bdfe46db5e4b3ab1cd1531924..f12d5c69e74dea275d772308cdcf311da4f54956 100644 (file)
@@ -60,6 +60,7 @@
 #include <asm/of_device.h>
 #include <asm/lmb.h>
 #include <asm/mpic.h>
+#include <asm/rtas.h>
 #include <asm/udbg.h>
 
 #include "maple.h"
@@ -166,6 +167,16 @@ struct smp_ops_t maple_smp_ops = {
 };
 #endif /* CONFIG_SMP */
 
+static void __init maple_use_rtas_reboot_and_halt_if_present(void)
+{
+       if (rtas_service_present("system-reboot") &&
+           rtas_service_present("power-off")) {
+               ppc_md.restart = rtas_restart;
+               ppc_md.power_off = rtas_power_off;
+               ppc_md.halt = rtas_halt;
+       }
+}
+
 void __init maple_setup_arch(void)
 {
        /* init to some ~sane value until calibrate_delay() runs */
@@ -181,6 +192,7 @@ void __init maple_setup_arch(void)
 #ifdef CONFIG_DUMMY_CONSOLE
        conswitchp = &dummy_con;
 #endif
+       maple_use_rtas_reboot_and_halt_if_present();
 
        printk(KERN_DEBUG "Using native/NAP idle loop\n");
 }
index 451bfcd5502e537549937f989bc5b99795d1866c..de52ec4e9e586c7d11e088587beddfb669ce4a6e 100644 (file)
@@ -40,4 +40,15 @@ config PS3_USE_LPAR_ADDR
 
          If you have any doubt, choose the default y.
 
+config PS3_VUART
+       depends on PPC_PS3
+       bool "PS3 Virtual UART support"
+       default y
+       help
+         Include support for the PS3 Virtual UART.
+
+         This support is required for several system services
+         including the System Manager and AV Settings.  In
+         general, all users will say Y.
+
 endmenu
index 997243a91be8778b77b95619e20f385527c7c9ef..69590fbf83daef9ba3a246c7e9f9a14b0d8261b5 100644 (file)
@@ -10,6 +10,8 @@ obj-$(CONFIG_XICS)    += xics.o
 obj-$(CONFIG_SCANLOG)  += scanlog.o
 obj-$(CONFIG_EEH)      += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
 
+obj-$(CONFIG_HOTPLUG_CPU)      += hotplug-cpu.o
+
 obj-$(CONFIG_HVC_CONSOLE)      += hvconsole.o
 obj-$(CONFIG_HVCS)             += hvcserver.o
 obj-$(CONFIG_HCALL_STATS)      += hvCall_inst.o
index 3c2d63ebf787fae9e60e3c98713c36d3e1590d8f..da6e5362e7cd8f13f802729e957538cc491e41b3 100644 (file)
@@ -337,6 +337,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
                        printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
                                pdn->eeh_check_count);
                        dump_stack();
+                       msleep(5000);
                        
                        /* re-read the slot reset state */
                        if (read_slot_reset_state(pdn, rets) != 0)
index c2bc9904f1cb9af0369b11ceb76bae5fcf12f619..cbd6b0711ab450f1c306930d1ebdb50e3834201d 100644 (file)
@@ -170,14 +170,19 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
 static void eeh_report_resume(struct pci_dev *dev, void *userdata)
 {
        struct pci_driver *driver = dev->driver;
+       struct device_node *dn = pci_device_to_OF_node(dev);
 
        dev->error_state = pci_channel_io_normal;
 
        if (!driver)
                return;
-       if (!driver->err_handler)
-               return;
-       if (!driver->err_handler->resume)
+
+       if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
+               PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
+               enable_irq(dev->irq);
+       }
+       if (!driver->err_handler ||
+           !driver->err_handler->resume)
                return;
 
        driver->err_handler->resume(dev);
@@ -407,6 +412,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
 
                if (rc)
                        result = PCI_ERS_RESULT_NEED_RESET;
+               else
+                       result = PCI_ERS_RESULT_RECOVERED;
        }
 
        /* If any device has a hard failure, then shut off everything. */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
new file mode 100644 (file)
index 0000000..f460b9c
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * pseries CPU Hotplug infrastructure.
+ *
+ * Split out from arch/powerpc/platforms/pseries/setup.c
+ *  arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
+ *
+ * Peter Bergner, IBM  March 2001.
+ * Copyright (C) 2001 IBM.
+ * Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
+ * Plus various changes from other IBM teams...
+ *
+ * Copyright (C) 2006 Michael Ellerman, IBM Corporation
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/firmware.h>
+#include <asm/machdep.h>
+#include <asm/vdso_datapage.h>
+#include <asm/pSeries_reconfig.h>
+#include "xics.h"
+
+/* This version can't take the spinlock, because it never returns */
+static struct rtas_args rtas_stop_self_args = {
+       .token = RTAS_UNKNOWN_SERVICE,
+       .nargs = 0,
+       .nret = 1,
+       .rets = &rtas_stop_self_args.args[0],
+};
+
+static void rtas_stop_self(void)
+{
+       struct rtas_args *args = &rtas_stop_self_args;
+
+       local_irq_disable();
+
+       BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+
+       printk("cpu %u (hwid %u) Ready to die...\n",
+              smp_processor_id(), hard_smp_processor_id());
+       enter_rtas(__pa(args));
+
+       panic("Alas, I survived.\n");
+}
+
+static void pseries_mach_cpu_die(void)
+{
+       local_irq_disable();
+       idle_task_exit();
+       xics_teardown_cpu(0);
+       rtas_stop_self();
+       /* Should never get here... */
+       BUG();
+       for(;;);
+}
+
+static int qcss_tok;   /* query-cpu-stopped-state token */
+
+/* Get state of physical CPU.
+ * Return codes:
+ *     0       - The processor is in the RTAS stopped state
+ *     1       - stop-self is in progress
+ *     2       - The processor is not in the RTAS stopped state
+ *     -1      - Hardware Error
+ *     -2      - Hardware Busy, Try again later.
+ */
+static int query_cpu_stopped(unsigned int pcpu)
+{
+       int cpu_status, status;
+
+       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+       if (status != 0) {
+               printk(KERN_ERR
+                      "RTAS query-cpu-stopped-state failed: %i\n", status);
+               return status;
+       }
+
+       return cpu_status;
+}
+
+static int pseries_cpu_disable(void)
+{
+       int cpu = smp_processor_id();
+
+       cpu_clear(cpu, cpu_online_map);
+       vdso_data->processorCount--;
+
+       /*fix boot_cpuid here*/
+       if (cpu == boot_cpuid)
+               boot_cpuid = any_online_cpu(cpu_online_map);
+
+       /* FIXME: abstract this to not be platform specific later on */
+       xics_migrate_irqs_away();
+       return 0;
+}
+
+static void pseries_cpu_die(unsigned int cpu)
+{
+       int tries;
+       int cpu_status;
+       unsigned int pcpu = get_hard_smp_processor_id(cpu);
+
+       for (tries = 0; tries < 25; tries++) {
+               cpu_status = query_cpu_stopped(pcpu);
+               if (cpu_status == 0 || cpu_status == -1)
+                       break;
+               msleep(200);
+       }
+       if (cpu_status != 0) {
+               printk("Querying DEAD? cpu %i (%i) shows %i\n",
+                      cpu, pcpu, cpu_status);
+       }
+
+       /* Isolation and deallocation are definatly done by
+        * drslot_chrp_cpu.  If they were not they would be
+        * done here.  Change isolate state to Isolate and
+        * change allocation-state to Unusable.
+        */
+       paca[cpu].cpu_start = 0;
+}
+
+/*
+ * Update cpu_present_map and paca(s) for a new cpu node.  The wrinkle
+ * here is that a cpu device node may represent up to two logical cpus
+ * in the SMT case.  We must honor the assumption in other code that
+ * the logical ids for sibling SMT threads x and y are adjacent, such
+ * that x^1 == y and y^1 == x.
+ */
+static int pseries_add_processor(struct device_node *np)
+{
+       unsigned int cpu;
+       cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+       int err = -ENOSPC, len, nthreads, i;
+       const u32 *intserv;
+
+       intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return 0;
+
+       nthreads = len / sizeof(u32);
+       for (i = 0; i < nthreads; i++)
+               cpu_set(i, tmp);
+
+       lock_cpu_hotplug();
+
+       BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+
+       /* Get a bitmap of unoccupied slots. */
+       cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
+       if (cpus_empty(candidate_map)) {
+               /* If we get here, it most likely means that NR_CPUS is
+                * less than the partition's max processors setting.
+                */
+               printk(KERN_ERR "Cannot add cpu %s; this system configuration"
+                      " supports %d logical cpus.\n", np->full_name,
+                      cpus_weight(cpu_possible_map));
+               goto out_unlock;
+       }
+
+       while (!cpus_empty(tmp))
+               if (cpus_subset(tmp, candidate_map))
+                       /* Found a range where we can insert the new cpu(s) */
+                       break;
+               else
+                       cpus_shift_left(tmp, tmp, nthreads);
+
+       if (cpus_empty(tmp)) {
+               printk(KERN_ERR "Unable to find space in cpu_present_map for"
+                      " processor %s with %d thread(s)\n", np->name,
+                      nthreads);
+               goto out_unlock;
+       }
+
+       for_each_cpu_mask(cpu, tmp) {
+               BUG_ON(cpu_isset(cpu, cpu_present_map));
+               cpu_set(cpu, cpu_present_map);
+               set_hard_smp_processor_id(cpu, *intserv++);
+       }
+       err = 0;
+out_unlock:
+       unlock_cpu_hotplug();
+       return err;
+}
+
+/*
+ * Update the present map for a cpu node which is going away, and set
+ * the hard id in the paca(s) to -1 to be consistent with boot time
+ * convention for non-present cpus.
+ */
+static void pseries_remove_processor(struct device_node *np)
+{
+       unsigned int cpu;
+       int len, nthreads, i;
+       const u32 *intserv;
+
+       intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return;
+
+       nthreads = len / sizeof(u32);
+
+       lock_cpu_hotplug();
+       for (i = 0; i < nthreads; i++) {
+               for_each_present_cpu(cpu) {
+                       if (get_hard_smp_processor_id(cpu) != intserv[i])
+                               continue;
+                       BUG_ON(cpu_online(cpu));
+                       cpu_clear(cpu, cpu_present_map);
+                       set_hard_smp_processor_id(cpu, -1);
+                       break;
+               }
+               if (cpu == NR_CPUS)
+                       printk(KERN_WARNING "Could not find cpu to remove "
+                              "with physical id 0x%x\n", intserv[i]);
+       }
+       unlock_cpu_hotplug();
+}
+
+static int pseries_smp_notifier(struct notifier_block *nb,
+                               unsigned long action, void *node)
+{
+       int err = NOTIFY_OK;
+
+       switch (action) {
+       case PSERIES_RECONFIG_ADD:
+               if (pseries_add_processor(node))
+                       err = NOTIFY_BAD;
+               break;
+       case PSERIES_RECONFIG_REMOVE:
+               pseries_remove_processor(node);
+               break;
+       default:
+               err = NOTIFY_DONE;
+               break;
+       }
+       return err;
+}
+
+static struct notifier_block pseries_smp_nb = {
+       .notifier_call = pseries_smp_notifier,
+};
+
+static int __init pseries_cpu_hotplug_init(void)
+{
+       rtas_stop_self_args.token = rtas_token("stop-self");
+       qcss_tok = rtas_token("query-cpu-stopped-state");
+
+       if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+                       qcss_tok == RTAS_UNKNOWN_SERVICE) {
+               printk(KERN_INFO "CPU Hotplug not supported by firmware "
+                               "- disabling.\n");
+               return 0;
+       }
+
+       ppc_md.cpu_die = pseries_mach_cpu_die;
+       smp_ops->cpu_disable = pseries_cpu_disable;
+       smp_ops->cpu_die = pseries_cpu_die;
+
+       /* Processors can be added/removed only on LPAR */
+       if (firmware_has_feature(FW_FEATURE_LPAR))
+               pSeries_reconfig_notifier_register(&pseries_smp_nb);
+
+       return 0;
+}
+arch_initcall(pseries_cpu_hotplug_init);
index 0dc2548ca9bcdd45bebf827db6aa2fe813b20bb0..042ecae107ac89066c5596ab0ccafe58161539d9 100644 (file)
@@ -347,21 +347,6 @@ static int __init pSeries_init_panel(void)
 }
 arch_initcall(pSeries_init_panel);
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void pSeries_mach_cpu_die(void)
-{
-       local_irq_disable();
-       idle_task_exit();
-       xics_teardown_cpu(0);
-       rtas_stop_self();
-       /* Should never get here... */
-       BUG();
-       for(;;);
-}
-#else
-#define pSeries_mach_cpu_die NULL
-#endif
-
 static int pseries_set_dabr(unsigned long dabr)
 {
        return plpar_hcall_norets(H_SET_DABR, dabr);
@@ -437,19 +422,14 @@ static int __init pSeries_probe_hypertas(unsigned long node,
        if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
                powerpc_firmware_features |= FW_FEATURE_LPAR;
 
-       if (firmware_has_feature(FW_FEATURE_LPAR))
-               hpte_init_lpar();
-       else
-               hpte_init_native();
-
        return 1;
 }
 
 static int __init pSeries_probe(void)
 {
        unsigned long root = of_get_flat_dt_root();
-       char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
-                                         "device_type", NULL);
+       char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+
        if (dtype == NULL)
                return 0;
        if (strcmp(dtype, "chrp"))
@@ -467,6 +447,11 @@ static int __init pSeries_probe(void)
        /* Now try to figure out if we are running on LPAR */
        of_scan_flat_dt(pSeries_probe_hypertas, NULL);
 
+       if (firmware_has_feature(FW_FEATURE_LPAR))
+               hpte_init_lpar();
+       else
+               hpte_init_native();
+
        DBG("Machine is%s LPAR !\n",
            (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
 
@@ -561,7 +546,6 @@ define_machine(pseries) {
        .power_off              = rtas_power_off,
        .halt                   = rtas_halt,
        .panic                  = rtas_os_term,
-       .cpu_die                = pSeries_mach_cpu_die,
        .get_boot_time          = rtas_get_boot_time,
        .get_rtc_time           = rtas_get_rtc_time,
        .set_rtc_time           = rtas_set_rtc_time,
index c6624b8a0e774b2aca2e3dab6fc268785f623318..4408518eaebe4afe76d6a1b3f354a4a6cc0a0421 100644 (file)
@@ -64,197 +64,6 @@ static cpumask_t of_spin_map;
 
 extern void generic_secondary_smp_init(unsigned long);
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Get state of physical CPU.
- * Return codes:
- *     0       - The processor is in the RTAS stopped state
- *     1       - stop-self is in progress
- *     2       - The processor is not in the RTAS stopped state
- *     -1      - Hardware Error
- *     -2      - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
-       int cpu_status;
-       int status, qcss_tok;
-
-       qcss_tok = rtas_token("query-cpu-stopped-state");
-       if (qcss_tok == RTAS_UNKNOWN_SERVICE)
-               return -1;
-       status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
-       if (status != 0) {
-               printk(KERN_ERR
-                      "RTAS query-cpu-stopped-state failed: %i\n", status);
-               return status;
-       }
-
-       return cpu_status;
-}
-
-static int pSeries_cpu_disable(void)
-{
-       int cpu = smp_processor_id();
-
-       cpu_clear(cpu, cpu_online_map);
-       vdso_data->processorCount--;
-
-       /*fix boot_cpuid here*/
-       if (cpu == boot_cpuid)
-               boot_cpuid = any_online_cpu(cpu_online_map);
-
-       /* FIXME: abstract this to not be platform specific later on */
-       xics_migrate_irqs_away();
-       return 0;
-}
-
-static void pSeries_cpu_die(unsigned int cpu)
-{
-       int tries;
-       int cpu_status;
-       unsigned int pcpu = get_hard_smp_processor_id(cpu);
-
-       for (tries = 0; tries < 25; tries++) {
-               cpu_status = query_cpu_stopped(pcpu);
-               if (cpu_status == 0 || cpu_status == -1)
-                       break;
-               msleep(200);
-       }
-       if (cpu_status != 0) {
-               printk("Querying DEAD? cpu %i (%i) shows %i\n",
-                      cpu, pcpu, cpu_status);
-       }
-
-       /* Isolation and deallocation are definatly done by
-        * drslot_chrp_cpu.  If they were not they would be
-        * done here.  Change isolate state to Isolate and
-        * change allocation-state to Unusable.
-        */
-       paca[cpu].cpu_start = 0;
-}
-
-/*
- * Update cpu_present_map and paca(s) for a new cpu node.  The wrinkle
- * here is that a cpu device node may represent up to two logical cpus
- * in the SMT case.  We must honor the assumption in other code that
- * the logical ids for sibling SMT threads x and y are adjacent, such
- * that x^1 == y and y^1 == x.
- */
-static int pSeries_add_processor(struct device_node *np)
-{
-       unsigned int cpu;
-       cpumask_t candidate_map, tmp = CPU_MASK_NONE;
-       int err = -ENOSPC, len, nthreads, i;
-       const u32 *intserv;
-
-       intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return 0;
-
-       nthreads = len / sizeof(u32);
-       for (i = 0; i < nthreads; i++)
-               cpu_set(i, tmp);
-
-       lock_cpu_hotplug();
-
-       BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
-
-       /* Get a bitmap of unoccupied slots. */
-       cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
-       if (cpus_empty(candidate_map)) {
-               /* If we get here, it most likely means that NR_CPUS is
-                * less than the partition's max processors setting.
-                */
-               printk(KERN_ERR "Cannot add cpu %s; this system configuration"
-                      " supports %d logical cpus.\n", np->full_name,
-                      cpus_weight(cpu_possible_map));
-               goto out_unlock;
-       }
-
-       while (!cpus_empty(tmp))
-               if (cpus_subset(tmp, candidate_map))
-                       /* Found a range where we can insert the new cpu(s) */
-                       break;
-               else
-                       cpus_shift_left(tmp, tmp, nthreads);
-
-       if (cpus_empty(tmp)) {
-               printk(KERN_ERR "Unable to find space in cpu_present_map for"
-                      " processor %s with %d thread(s)\n", np->name,
-                      nthreads);
-               goto out_unlock;
-       }
-
-       for_each_cpu_mask(cpu, tmp) {
-               BUG_ON(cpu_isset(cpu, cpu_present_map));
-               cpu_set(cpu, cpu_present_map);
-               set_hard_smp_processor_id(cpu, *intserv++);
-       }
-       err = 0;
-out_unlock:
-       unlock_cpu_hotplug();
-       return err;
-}
-
-/*
- * Update the present map for a cpu node which is going away, and set
- * the hard id in the paca(s) to -1 to be consistent with boot time
- * convention for non-present cpus.
- */
-static void pSeries_remove_processor(struct device_node *np)
-{
-       unsigned int cpu;
-       int len, nthreads, i;
-       const u32 *intserv;
-
-       intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return;
-
-       nthreads = len / sizeof(u32);
-
-       lock_cpu_hotplug();
-       for (i = 0; i < nthreads; i++) {
-               for_each_present_cpu(cpu) {
-                       if (get_hard_smp_processor_id(cpu) != intserv[i])
-                               continue;
-                       BUG_ON(cpu_online(cpu));
-                       cpu_clear(cpu, cpu_present_map);
-                       set_hard_smp_processor_id(cpu, -1);
-                       break;
-               }
-               if (cpu == NR_CPUS)
-                       printk(KERN_WARNING "Could not find cpu to remove "
-                              "with physical id 0x%x\n", intserv[i]);
-       }
-       unlock_cpu_hotplug();
-}
-
-static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
-{
-       int err = NOTIFY_OK;
-
-       switch (action) {
-       case PSERIES_RECONFIG_ADD:
-               if (pSeries_add_processor(node))
-                       err = NOTIFY_BAD;
-               break;
-       case PSERIES_RECONFIG_REMOVE:
-               pSeries_remove_processor(node);
-               break;
-       default:
-               err = NOTIFY_DONE;
-               break;
-       }
-       return err;
-}
-
-static struct notifier_block pSeries_smp_nb = {
-       .notifier_call = pSeries_smp_notifier,
-};
-
-#endif /* CONFIG_HOTPLUG_CPU */
-
 /**
  * smp_startup_cpu() - start the given cpu
  *
@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void)
 
        DBG(" -> smp_init_pSeries()\n");
 
-#ifdef CONFIG_HOTPLUG_CPU
-       smp_ops->cpu_disable = pSeries_cpu_disable;
-       smp_ops->cpu_die = pSeries_cpu_die;
-
-       /* Processors can be added/removed only on LPAR */
-       if (firmware_has_feature(FW_FEATURE_LPAR))
-               pSeries_reconfig_notifier_register(&pSeries_smp_nb);
-#endif
-
        /* Mark threads which are still spinning in hold loops. */
        if (cpu_has_feature(CPU_FTR_SMT)) {
                for_each_present_cpu(i) { 
index 6cc34597a620202e901927359bf9207664ffb0a8..04d4917eb3035c4140d184e1c740b6fc9eb535cd 100644 (file)
@@ -5,7 +5,8 @@ endif
 obj-$(CONFIG_MPIC)             += mpic.o
 obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
 obj-$(CONFIG_PPC_MPC106)       += grackle.o
-obj-$(CONFIG_PPC_DCR)          += dcr.o dcr-low.o
+obj-$(CONFIG_PPC_DCR)          += dcr.o
+obj-$(CONFIG_PPC_DCR_NATIVE)   += dcr-low.o
 obj-$(CONFIG_U3_DART)          += dart_iommu.o
 obj-$(CONFIG_MMIO_NVRAM)       += mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)          += fsl_soc.o
diff --git a/arch/powerpc/sysdev/dcr.S b/arch/powerpc/sysdev/dcr.S
deleted file mode 100644 (file)
index 2078f39..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * "Indirect" DCR access
- *
- * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under  the terms of  the GNU General Public License as published by the
- * Free Software Foundation;  either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/processor.h>
-
-#define DCR_ACCESS_PROLOG(table) \
-       rlwinm  r3,r3,4,18,27;   \
-       lis     r5,table@h;      \
-       ori     r5,r5,table@l;   \
-       add     r3,r3,r5;        \
-       mtctr   r3;              \
-       bctr
-
-_GLOBAL(__mfdcr)
-       DCR_ACCESS_PROLOG(__mfdcr_table)
-
-_GLOBAL(__mtdcr)
-       DCR_ACCESS_PROLOG(__mtdcr_table)
-
-__mfdcr_table:
-       mfdcr  r3,0; blr
-__mtdcr_table:
-       mtdcr  0,r4; blr
-
-dcr     = 1
-        .rept   1023
-       mfdcr   r3,dcr; blr
-       mtdcr   dcr,r4; blr
-       dcr     = dcr + 1
-       .endr
index 6995f51b94882bdcf55815572c305d42516e4771..74e48d94f27ced66c0ed03fad91644a5cdb26222 100644 (file)
@@ -223,23 +223,15 @@ static void qe_ic_mask_irq(unsigned int virq)
        qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
                    temp & ~qe_ic_info[src].mask);
 
-       spin_unlock_irqrestore(&qe_ic_lock, flags);
-}
-
-static void qe_ic_mask_irq_and_ack(unsigned int virq)
-{
-       struct qe_ic *qe_ic = qe_ic_from_irq(virq);
-       unsigned int src = virq_to_hw(virq);
-       unsigned long flags;
-       u32 temp;
-
-       spin_lock_irqsave(&qe_ic_lock, flags);
-
-       temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
-       qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
-                   temp & ~qe_ic_info[src].mask);
-
-       /* There is nothing to do for ack here, ack is handled in ISR */
+       /* Flush the above write before enabling interrupts; otherwise,
+        * spurious interrupts will sometimes happen.  To be 100% sure
+        * that the write has reached the device before interrupts are
+        * enabled, the mask register would have to be read back; however,
+        * this is not required for correctness, only to avoid wasting
+        * time on a large number of spurious interrupts.  In testing,
+        * a sync reduced the observed spurious interrupts to zero.
+        */
+       mb();
 
        spin_unlock_irqrestore(&qe_ic_lock, flags);
 }
@@ -248,7 +240,7 @@ static struct irq_chip qe_ic_irq_chip = {
        .typename = " QEIC  ",
        .unmask = qe_ic_unmask_irq,
        .mask = qe_ic_mask_irq,
-       .mask_ack = qe_ic_mask_irq_and_ack,
+       .mask_ack = qe_ic_mask_irq,
 };
 
 static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
@@ -331,34 +323,22 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
        return irq_linear_revmap(qe_ic->irqhost, irq);
 }
 
-/* FIXME: We mask all the QE Low interrupts while handling.  We should
- * let other interrupt come in, but BAD interrupts are generated */
 void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
 {
        struct qe_ic *qe_ic = desc->handler_data;
-       struct irq_chip *chip = irq_desc[irq].chip;
-
        unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
 
-       chip->mask_ack(irq);
        if (cascade_irq != NO_IRQ)
                generic_handle_irq(cascade_irq);
-       chip->unmask(irq);
 }
 
-/* FIXME: We mask all the QE High interrupts while handling.  We should
- * let other interrupt come in, but BAD interrupts are generated */
 void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
 {
        struct qe_ic *qe_ic = desc->handler_data;
-       struct irq_chip *chip = irq_desc[irq].chip;
-
        unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
 
-       chip->mask_ack(irq);
        if (cascade_irq != NO_IRQ)
                generic_handle_irq(cascade_irq);
-       chip->unmask(irq);
 }
 
 void __init qe_ic_init(struct device_node *node, unsigned int flags)
index bf5b3f10e6c600099011cab1ab9f0ccbd221a635..c855a3b298a3f253af247c949db8813622b47ba9 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/kernel.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 static int __init powerpc_flash_init(void)
 {
index a34ed49e0356c418a81fa5cfc85e0f47c0cd84a3..77540a2f77040734396570889e6127230d965404 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/sysrq.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/bug.h>
 
 #include <asm/ptrace.h>
 #include <asm/string.h>
@@ -35,7 +36,6 @@
 #include <asm/cputable.h>
 #include <asm/rtas.h>
 #include <asm/sstep.h>
-#include <asm/bug.h>
 #include <asm/irq_regs.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
@@ -1346,7 +1346,7 @@ static void backtrace(struct pt_regs *excp)
 
 static void print_bug_trap(struct pt_regs *regs)
 {
-       struct bug_entry *bug;
+       const struct bug_entry *bug;
        unsigned long addr;
 
        if (regs->msr & MSR_PR)
@@ -1357,11 +1357,11 @@ static void print_bug_trap(struct pt_regs *regs)
        bug = find_bug(regs->nip);
        if (bug == NULL)
                return;
-       if (bug->line & BUG_WARNING_TRAP)
+       if (is_warning_bug(bug))
                return;
 
-       printf("kernel BUG in %s at %s:%d!\n",
-              bug->function, bug->file, (unsigned int)bug->line);
+       printf("kernel BUG at %s:%u!\n",
+              bug->file, bug->line);
 }
 
 void excprint(struct pt_regs *fp)
index 63808e01cb0b7cc51ea75c20885c630d8ef63ea4..5e723c4c2571b0a3dc886032a92681a9927b9d7c 100644 (file)
@@ -879,7 +879,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
 
 
 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
-                                              unsigned long *offset,
+                                              resource_size_t *offset,
                                               enum pci_mmap_state mmap_state)
 {
        struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -891,7 +891,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
 
        /* If memory, add on the PCI bridge address offset */
        if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
                *offset += hose->pci_mem_offset;
+#endif
                res_bit = IORESOURCE_MEM;
        } else {
                io_offset = hose->io_base_virt - ___IO_BASE;
@@ -1030,7 +1032,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                        enum pci_mmap_state mmap_state,
                        int write_combine)
 {
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
        struct resource *rp;
        int ret;
 
@@ -1132,21 +1134,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          resource_size_t *start, resource_size_t *end)
 {
        struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
-       unsigned long offset = 0;
+       resource_size_t offset = 0;
 
        if (hose == NULL)
                return;
 
        if (rsrc->flags & IORESOURCE_IO)
-               offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
+               offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+
+       /* We pass a fully fixed up address to userland for MMIO instead of
+        * a BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem !
+        *
+        * That means that we'll have potentially 64 bits values where some
+        * userland apps only expect 32 (like X itself since it thinks only
+        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+        * 32 bits CHRPs :-(
+        *
+        * Hopefully, the sysfs insterface is immune to that gunk. Once X
+        * has been fixed (and the fix spread enough), we can re-enable the
+        * 2 lines below and pass down a BAR value to userland. In that case
+        * we'll also have to re-enable the matching code in
+        * __pci_mmap_make_offset().
+        *
+        * BenH.
+        */
+#if 0
+       else if (rsrc->flags & IORESOURCE_MEM)
+               offset = hose->pci_mem_offset;
+#endif
 
-       *start = rsrc->start + offset;
-       *end = rsrc->end + offset;
+       *start = rsrc->start - offset;
+       *end = rsrc->end - offset;
 }
 
-void __init
-pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
-                 int flags, char *name)
+void __init pci_init_resource(struct resource *res, resource_size_t start,
+                             resource_size_t end, int flags, char *name)
 {
        res->start = start;
        res->end = end;
index 785e61c9a810092d13e72f23779d754802aa1667..a541b42c08e343f848d292a1ebfda4a0e82d5090 100644 (file)
@@ -129,13 +129,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
-void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
-{
-       q->activity_fn = fn;
-       q->activity_data = data;
-}
-EXPORT_SYMBOL(blk_queue_activity_fn);
-
 /**
  * blk_queue_prep_rq - set a prepare_request function for queue
  * @q:         queue
@@ -238,8 +231,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
         * by default assume old behaviour and bounce for any highmem page
         */
        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
-       blk_queue_activity_fn(q, NULL, NULL);
 }
 
 EXPORT_SYMBOL(blk_queue_make_request);
@@ -2696,9 +2687,6 @@ static inline void add_request(request_queue_t * q, struct request * req)
 {
        drive_stat_acct(req, req->nr_sectors, 1);
 
-       if (q->activity_fn)
-               q->activity_fn(q->activity_data, rq_data_dir(req));
-
        /*
         * elevator indicated where it wants this request to be
         * inserted at elevator_merge time
index b3e210723a71f637605852b7d8136ef594af95d2..f322b6a441d82fbe7db9a177ac0837fc54618cf2 100644 (file)
@@ -228,6 +228,7 @@ static int sg_io(struct file *file, request_queue_t *q,
        struct request *rq;
        char sense[SCSI_SENSE_BUFFERSIZE];
        unsigned char cmd[BLK_MAX_CDB];
+       struct bio *bio;
 
        if (hdr->interface_id != 'S')
                return -EINVAL;
@@ -270,13 +271,6 @@ static int sg_io(struct file *file, request_queue_t *q,
 
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
 
-       /*
-        * bounce this after holding a reference to the original bio, it's
-        * needed for proper unmapping
-        */
-       if (rq->bio)
-               blk_queue_bounce(q, &rq->bio);
-
        rq->timeout = jiffies_to_msecs(hdr->timeout);
        if (!rq->timeout)
                rq->timeout = q->sg_timeout;
@@ -308,6 +302,7 @@ static int sg_io(struct file *file, request_queue_t *q,
        if (ret)
                goto out;
 
+       bio = rq->bio;
        rq->retries = 0;
 
        start_time = jiffies;
@@ -338,6 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
                        hdr->sb_len_wr = len;
        }
 
+       rq->bio = bio;
        if (blk_rq_unmap_user(rq))
                ret = -EFAULT;
 
index 2dfe7f170b4893bc6b5bdf5870c9ac3b77b89c9d..15eab9db9be410dfe12d990b4bdc2b417e798050 100644 (file)
@@ -24,7 +24,7 @@
 
 #define SHA384_DIGEST_SIZE 48
 #define SHA512_DIGEST_SIZE 64
-#define SHA384_HMAC_BLOCK_SIZE  96
+#define SHA384_HMAC_BLOCK_SIZE 128
 #define SHA512_HMAC_BLOCK_SIZE 128
 
 struct sha512_ctx {
index a165b716771408c589b53e0aa074b03347e2e252..fc0ae5eb05d81c5c03e708d226ec1bbb3f2c25aa 100644 (file)
@@ -2,4 +2,4 @@
 fore200e_mkfirm
 fore200e_pca_fw.c
 pca200e.bin
-
+pca200e_ecd.bin2
index dad9c47ebb69bb0aa667689f701301a34118c115..5a5c565a32a8230959318ebe4e0e7b8c78093af1 100644 (file)
@@ -1000,10 +1000,6 @@ static int ide_init_queue(ide_drive_t *drive)
        /* needs drive->queue to be set */
        ide_toggle_bounce(drive, 1);
 
-       /* enable led activity for disk drives only */
-       if (drive->media == ide_disk && hwif->led_act)
-               blk_queue_activity_fn(q, hwif->led_act, drive);
-
        return 0;
 }
 
index 92bf9a1f1e2cbf71668484c1ad4307f0029b34ad..0f3fba7ea6fe1ba74e81c7555174f1b3de64b4f2 100644 (file)
@@ -50,6 +50,11 @@ MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
 #define PDEBUG(level, fmt, args...) \
                if (i2c_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
 
+static int usbvision_i2c_write(void *data, unsigned char addr, char *buf,
+                           short len);
+static int usbvision_i2c_read(void *data, unsigned char addr, char *buf,
+                          short len);
+
 static inline int try_write_address(struct i2c_adapter *i2c_adap,
                                    unsigned char addr, int retries)
 {
@@ -61,7 +66,7 @@ static inline int try_write_address(struct i2c_adapter *i2c_adap,
        data = i2c_get_adapdata(i2c_adap);
        buf[0] = 0x00;
        for (i = 0; i <= retries; i++) {
-               ret = (adap->outb(data, addr, buf, 1));
+               ret = (usbvision_i2c_write(data, addr, buf, 1));
                if (ret == 1)
                        break;  /* success! */
                udelay(5 /*adap->udelay */ );
@@ -86,7 +91,7 @@ static inline int try_read_address(struct i2c_adapter *i2c_adap,
 
        data = i2c_get_adapdata(i2c_adap);
        for (i = 0; i <= retries; i++) {
-               ret = (adap->inb(data, addr, buf, 1));
+               ret = (usbvision_i2c_read(data, addr, buf, 1));
                if (ret == 1)
                        break;  /* success! */
                udelay(5 /*adap->udelay */ );
@@ -153,7 +158,6 @@ static int
 usb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
 {
        struct i2c_msg *pmsg;
-       struct i2c_algo_usb_data *adap = i2c_adap->algo_data;
        void *data;
        int i, ret;
        unsigned char addr;
@@ -170,13 +174,13 @@ usb_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
 
                if (pmsg->flags & I2C_M_RD) {
                        /* read bytes into buffer */
-                       ret = (adap->inb(data, addr, pmsg->buf, pmsg->len));
+                       ret = (usbvision_i2c_read(data, addr, pmsg->buf, pmsg->len));
                        if (ret < pmsg->len) {
                                return (ret < 0) ? ret : -EREMOTEIO;
                        }
                } else {
                        /* write bytes from buffer */
-                       ret = (adap->outb(data, addr, pmsg->buf, pmsg->len));
+                       ret = (usbvision_i2c_write(data, addr, pmsg->buf, pmsg->len));
                        if (ret < pmsg->len) {
                                return (ret < 0) ? ret : -EREMOTEIO;
                        }
index 931028f672de0eb610622895927db75b3bc23d57..35ad5cff18e65936862c20276de4468189a12568 100644 (file)
@@ -2131,14 +2131,15 @@ static int rtl8139_poll(struct net_device *dev, int *budget)
        }
 
        if (done) {
+               unsigned long flags;
                /*
                 * Order is important since data can get interrupted
                 * again when we think we are done.
                 */
-               local_irq_disable();
+               local_irq_save(flags);
                RTL_W16_F(IntrMask, rtl8139_intr_mask);
                __netif_rx_complete(dev);
-               local_irq_enable();
+               local_irq_restore(flags);
        }
        spin_unlock(&tp->rx_lock);
 
index 8a83db0fb3b75c6be9232aed06153e08897c543c..153b6dc80af4c38bebcca28a4cf8afc6a4209124 100644 (file)
@@ -1177,7 +1177,7 @@ static void baycom_probe(struct net_device *dev)
        dev->mtu = AX25_DEF_PACLEN;        /* eth_mtu is the default */
        dev->addr_len = AX25_ADDR_LEN;     /* sizeof an ax.25 address */
        memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
-       memcpy(dev->dev_addr, &ax25_nocall, AX25_ADDR_LEN);
+       memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
        dev->tx_queue_len = 16;
 
        /* New style flags */
index d5ab9cf13257145eb3e43d6f508850f6228f87e6..21f76f51c95e4efd26c4466330e74a63d322f42d 100644 (file)
@@ -382,7 +382,7 @@ config SDLA
 
 # Wan router core.
 config WAN_ROUTER_DRIVERS
-       bool "WAN router drivers"
+       tristate "WAN router drivers"
        depends on WAN && WAN_ROUTER
        ---help---
          Connect LAN to WAN via Linux box.
@@ -393,7 +393,8 @@ config WAN_ROUTER_DRIVERS
          <file:Documentation/networking/wan-router.txt>.
 
          Note that the answer to this question won't directly affect the
-         kernel: saying N will just cause the configurator to skip all
+         kernel except for how subordinate drivers may be built:
+         saying N will just cause the configurator to skip all
          the questions about WAN router drivers.
 
          If unsure, say N.
index b52d547b7a78e38fac23e036170e9444224090f0..8433eb7562cb2c9ceb77e365564e12de90bae8d9 100644 (file)
@@ -1 +1,2 @@
 obj-y += system-bus.o
+obj-$(CONFIG_PS3_VUART) += vuart.o
diff --git a/drivers/ps3/vuart.c b/drivers/ps3/vuart.c
new file mode 100644 (file)
index 0000000..6974f65
--- /dev/null
@@ -0,0 +1,965 @@
+/*
+ *  PS3 virtual uart
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <asm/ps3.h>
+
+#include <asm/lv1call.h>
+#include <asm/bitops.h>
+
+#include "vuart.h"
+
+MODULE_AUTHOR("Sony Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ps3 vuart");
+
+/**
+ * vuart - An inter-partition data link service.
+ *  port 0: PS3 AV Settings.
+ *  port 2: PS3 System Manager.
+ *
+ * The vuart provides a bi-directional byte stream data link between logical
+ * partitions.  Its primary role is as a communications link between the guest
+ * OS and the system policy module.  The current HV does not support any
+ * connections other than those listed.
+ */
+
+enum {PORT_COUNT = 3,};
+
+enum vuart_param {
+       PARAM_TX_TRIGGER = 0,
+       PARAM_RX_TRIGGER = 1,
+       PARAM_INTERRUPT_MASK = 2,
+       PARAM_RX_BUF_SIZE = 3, /* read only */
+       PARAM_RX_BYTES = 4, /* read only */
+       PARAM_TX_BUF_SIZE = 5, /* read only */
+       PARAM_TX_BYTES = 6, /* read only */
+       PARAM_INTERRUPT_STATUS = 7, /* read only */
+};
+
+enum vuart_interrupt_bit {
+       INTERRUPT_BIT_TX = 0,
+       INTERRUPT_BIT_RX = 1,
+       INTERRUPT_BIT_DISCONNECT = 2,
+};
+
+enum vuart_interrupt_mask {
+       INTERRUPT_MASK_TX = 1,
+       INTERRUPT_MASK_RX = 2,
+       INTERRUPT_MASK_DISCONNECT = 4,
+};
+
+/**
+ * struct ports_bmp - bitmap indicating ports needing service.
+ *
+ * A 256 bit read only bitmap indicating ports needing service.  Do not write
+ * to these bits.  Must not cross a page boundary.
+ */
+
+struct ports_bmp {
+       u64 status;
+       u64 unused[3];
+} __attribute__ ((aligned (32)));
+
+/* redefine dev_dbg to do a syntax check */
+
+#if !defined(DEBUG)
+#undef dev_dbg
+static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
+       const struct device *_dev, const char *fmt, ...) {return 0;}
+#endif
+
+#define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_ports_bmp(
+       const struct ports_bmp* bmp, const char* func, int line)
+{
+       pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
+}
+
+static int ps3_vuart_match_id_to_port(enum ps3_match_id match_id,
+       unsigned int *port_number)
+{
+       switch(match_id) {
+       case PS3_MATCH_ID_AV_SETTINGS:
+               *port_number = 0;
+               return 0;
+       case PS3_MATCH_ID_SYSTEM_MANAGER:
+               *port_number = 2;
+               return 0;
+       default:
+               WARN_ON(1);
+               *port_number = UINT_MAX;
+               return -EINVAL;
+       };
+}
+
+#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_port_params(unsigned int port_number,
+       const char* func, int line)
+{
+#if defined(DEBUG)
+       static const char *strings[] = {
+               "tx_trigger      ",
+               "rx_trigger      ",
+               "interrupt_mask  ",
+               "rx_buf_size     ",
+               "rx_bytes        ",
+               "tx_buf_size     ",
+               "tx_bytes        ",
+               "interrupt_status",
+       };
+       int result;
+       unsigned int i;
+       u64 value;
+
+       for (i = 0; i < ARRAY_SIZE(strings); i++) {
+               result = lv1_get_virtual_uart_param(port_number, i, &value);
+
+               if (result) {
+                       pr_debug("%s:%d: port_%u: %s failed: %s\n", func, line,
+                               port_number, strings[i], ps3_result(result));
+                       continue;
+               }
+               pr_debug("%s:%d: port_%u: %s = %lxh\n",
+                       func, line, port_number, strings[i], value);
+       }
+#endif
+}
+
+struct vuart_triggers {
+       unsigned long rx;
+       unsigned long tx;
+};
+
+int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
+       struct vuart_triggers *trig)
+{
+       int result;
+       unsigned long size;
+       unsigned long val;
+
+       result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_TX_TRIGGER, &trig->tx);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_RX_BUF_SIZE, &size);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_RX_TRIGGER, &val);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       trig->rx = size - val;
+
+       dev_dbg(&dev->core, "%s:%d: tx %lxh, rx %lxh\n", __func__, __LINE__,
+               trig->tx, trig->rx);
+
+       return result;
+}
+
+int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
+       unsigned int rx)
+{
+       int result;
+       unsigned long size;
+
+       result = lv1_set_virtual_uart_param(dev->port_number,
+               PARAM_TX_TRIGGER, tx);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_RX_BUF_SIZE, &size);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       result = lv1_set_virtual_uart_param(dev->port_number,
+               PARAM_RX_TRIGGER, size - rx);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       dev_dbg(&dev->core, "%s:%d: tx %xh, rx %xh\n", __func__, __LINE__,
+               tx, rx);
+
+       return result;
+}
+
+static int ps3_vuart_get_rx_bytes_waiting(struct ps3_vuart_port_device *dev,
+       unsigned long *bytes_waiting)
+{
+       int result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_RX_BYTES, bytes_waiting);
+
+       if (result)
+               dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__,
+               *bytes_waiting);
+       return result;
+}
+
+static int ps3_vuart_set_interrupt_mask(struct ps3_vuart_port_device *dev,
+       unsigned long mask)
+{
+       int result;
+
+       dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
+
+       dev->interrupt_mask = mask;
+
+       result = lv1_set_virtual_uart_param(dev->port_number,
+               PARAM_INTERRUPT_MASK, dev->interrupt_mask);
+
+       if (result)
+               dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       return result;
+}
+
+static int ps3_vuart_get_interrupt_mask(struct ps3_vuart_port_device *dev,
+       unsigned long *status)
+{
+       int result = lv1_get_virtual_uart_param(dev->port_number,
+               PARAM_INTERRUPT_STATUS, status);
+
+       if (result)
+               dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n",
+               __func__, __LINE__, dev->interrupt_mask, *status,
+               dev->interrupt_mask & *status);
+
+       return result;
+}
+
+int ps3_vuart_enable_interrupt_tx(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_TX) ? 0
+               : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               | INTERRUPT_MASK_TX);
+}
+
+int ps3_vuart_enable_interrupt_rx(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_RX) ? 0
+               : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               | INTERRUPT_MASK_RX);
+}
+
+int ps3_vuart_enable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
+               : ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               | INTERRUPT_MASK_DISCONNECT);
+}
+
+int ps3_vuart_disable_interrupt_tx(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_TX)
+               ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               & ~INTERRUPT_MASK_TX) : 0;
+}
+
+int ps3_vuart_disable_interrupt_rx(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_RX)
+               ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               & ~INTERRUPT_MASK_RX) : 0;
+}
+
+int ps3_vuart_disable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+{
+       return (dev->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
+               ? ps3_vuart_set_interrupt_mask(dev, dev->interrupt_mask
+               & ~INTERRUPT_MASK_DISCONNECT) : 0;
+}
+
+/**
+ * ps3_vuart_raw_write - Low level write helper.
+ *
+ * Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
+ */
+
+static int ps3_vuart_raw_write(struct ps3_vuart_port_device *dev,
+       const void* buf, unsigned int bytes, unsigned long *bytes_written)
+{
+       int result;
+
+       dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
+
+       result = lv1_write_virtual_uart(dev->port_number,
+               ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
+                       "%s\n", __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       dev->stats.bytes_written += *bytes_written;
+
+       dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__,
+               __LINE__, *bytes_written, bytes, dev->stats.bytes_written);
+
+       return result;
+}
+
+/**
+ * ps3_vuart_raw_read - Low level read helper.
+ *
+ * Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
+ */
+
+static int ps3_vuart_raw_read(struct ps3_vuart_port_device *dev, void* buf,
+       unsigned int bytes, unsigned long *bytes_read)
+{
+       int result;
+
+       dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
+
+       result = lv1_read_virtual_uart(dev->port_number,
+               ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: lv1_read_virtual_uart failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       dev->stats.bytes_read += *bytes_read;
+
+       dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__,
+               *bytes_read, bytes, dev->stats.bytes_read);
+
+       return result;
+}
+
+/**
+ * struct list_buffer - An element for a port device fifo buffer list.
+ */
+
+struct list_buffer {
+       struct list_head link;
+       const unsigned char *head;
+       const unsigned char *tail;
+       unsigned long dbg_number;
+       unsigned char data[];
+};
+
+/**
+ * ps3_vuart_write - the entry point for writing data to a port
+ *
+ * If the port is idle on entry as much of the incoming data is written to
+ * the port as the port will accept.  Otherwise a list buffer is created
+ * and any remaning incoming data is copied to that buffer.  The buffer is
+ * then enqueued for transmision via the transmit interrupt.
+ */
+
+int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
+       unsigned int bytes)
+{
+       static unsigned long dbg_number;
+       int result;
+       unsigned long flags;
+       struct list_buffer *lb;
+
+       dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
+               bytes, bytes);
+
+       spin_lock_irqsave(&dev->tx_list.lock, flags);
+
+       if (list_empty(&dev->tx_list.head)) {
+               unsigned long bytes_written;
+
+               result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
+
+               spin_unlock_irqrestore(&dev->tx_list.lock, flags);
+
+               if (result) {
+                       dev_dbg(&dev->core,
+                               "%s:%d: ps3_vuart_raw_write failed\n",
+                               __func__, __LINE__);
+                       return result;
+               }
+
+               if (bytes_written == bytes) {
+                       dev_dbg(&dev->core, "%s:%d: wrote %xh bytes\n",
+                               __func__, __LINE__, bytes);
+                       return 0;
+               }
+
+               bytes -= bytes_written;
+               buf += bytes_written;
+       } else
+               spin_unlock_irqrestore(&dev->tx_list.lock, flags);
+
+       lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
+
+       if (!lb) {
+               return -ENOMEM;
+       }
+
+       memcpy(lb->data, buf, bytes);
+       lb->head = lb->data;
+       lb->tail = lb->data + bytes;
+       lb->dbg_number = ++dbg_number;
+
+       spin_lock_irqsave(&dev->tx_list.lock, flags);
+       list_add_tail(&lb->link, &dev->tx_list.head);
+       ps3_vuart_enable_interrupt_tx(dev);
+       spin_unlock_irqrestore(&dev->tx_list.lock, flags);
+
+       dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
+               __func__, __LINE__, lb->dbg_number, bytes);
+
+       return 0;
+}
+
+/**
+ * ps3_vuart_read - the entry point for reading data from a port
+ *
+ * If enough bytes to satisfy the request are held in the buffer list those
+ * bytes are dequeued and copied to the caller's buffer.  Emptied list buffers
+ * are retiered.  If the request cannot be statified by bytes held in the list
+ * buffers -EAGAIN is returned.
+ */
+
+int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+       unsigned int bytes)
+{
+       unsigned long flags;
+       struct list_buffer *lb, *n;
+       unsigned long bytes_read;
+
+       dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
+               bytes, bytes);
+
+       spin_lock_irqsave(&dev->rx_list.lock, flags);
+
+       if (dev->rx_list.bytes_held < bytes) {
+               spin_unlock_irqrestore(&dev->rx_list.lock, flags);
+               dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
+                       __func__, __LINE__, bytes - dev->rx_list.bytes_held);
+               return -EAGAIN;
+       }
+
+       list_for_each_entry_safe(lb, n, &dev->rx_list.head, link) {
+               bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
+
+               memcpy(buf, lb->head, bytes_read);
+               buf += bytes_read;
+               bytes -= bytes_read;
+               dev->rx_list.bytes_held -= bytes_read;
+
+               if (bytes_read < lb->tail - lb->head) {
+                       lb->head += bytes_read;
+                       spin_unlock_irqrestore(&dev->rx_list.lock, flags);
+
+                       dev_dbg(&dev->core,
+                               "%s:%d: dequeued buf_%lu, %lxh bytes\n",
+                               __func__, __LINE__, lb->dbg_number, bytes_read);
+                       return 0;
+               }
+
+               dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
+                       lb->dbg_number);
+
+               list_del(&lb->link);
+               kfree(lb);
+       }
+       spin_unlock_irqrestore(&dev->rx_list.lock, flags);
+
+       dev_dbg(&dev->core, "%s:%d: dequeued buf_%lu, %xh bytes\n",
+               __func__, __LINE__, lb->dbg_number, bytes);
+
+       return 0;
+}
+
+/**
+ * ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
+ *
+ * Services the transmit interrupt for the port.  Writes as much data from the
+ * buffer list as the port will accept.  Retires any emptied list buffers and
+ * adjusts the final list buffer state for a partial write.
+ */
+
+static int ps3_vuart_handle_interrupt_tx(struct ps3_vuart_port_device *dev)
+{
+       int result = 0;
+       unsigned long flags;
+       struct list_buffer *lb, *n;
+       unsigned long bytes_total = 0;
+
+       dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+       spin_lock_irqsave(&dev->tx_list.lock, flags);
+
+       list_for_each_entry_safe(lb, n, &dev->tx_list.head, link) {
+
+               unsigned long bytes_written;
+
+               result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
+                       &bytes_written);
+
+               if (result) {
+                       dev_dbg(&dev->core,
+                               "%s:%d: ps3_vuart_raw_write failed\n",
+                               __func__, __LINE__);
+                       break;
+               }
+
+               bytes_total += bytes_written;
+
+               if (bytes_written < lb->tail - lb->head) {
+                       lb->head += bytes_written;
+                       dev_dbg(&dev->core,
+                               "%s:%d cleared buf_%lu, %lxh bytes\n",
+                               __func__, __LINE__, lb->dbg_number,
+                               bytes_written);
+                       goto port_full;
+               }
+
+               dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
+                       lb->dbg_number);
+
+               list_del(&lb->link);
+               kfree(lb);
+       }
+
+       ps3_vuart_disable_interrupt_tx(dev);
+port_full:
+       spin_unlock_irqrestore(&dev->tx_list.lock, flags);
+       dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
+               __func__, __LINE__, bytes_total);
+       return result;
+}
+
+/**
+ * ps3_vuart_handle_interrupt_rx - third stage receive interrupt handler
+ *
+ * Services the receive interrupt for the port.  Creates a list buffer and
+ * copies all waiting port data to that buffer and enqueues the buffer in the
+ * buffer list.  Buffer list data is dequeued via ps3_vuart_read.
+ */
+
+static int ps3_vuart_handle_interrupt_rx(struct ps3_vuart_port_device *dev)
+{
+       static unsigned long dbg_number;
+       int result = 0;
+       unsigned long flags;
+       struct list_buffer *lb;
+       unsigned long bytes;
+
+       dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+       result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
+
+       if (result)
+               return -EIO;
+
+       BUG_ON(!bytes);
+
+       /* add some extra space for recently arrived data */
+
+       bytes += 128;
+
+       lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
+
+       if (!lb)
+               return -ENOMEM;
+
+       ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
+
+       lb->head = lb->data;
+       lb->tail = lb->data + bytes;
+       lb->dbg_number = ++dbg_number;
+
+       spin_lock_irqsave(&dev->rx_list.lock, flags);
+       list_add_tail(&lb->link, &dev->rx_list.head);
+       dev->rx_list.bytes_held += bytes;
+       spin_unlock_irqrestore(&dev->rx_list.lock, flags);
+
+       dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %lxh bytes\n",
+               __func__, __LINE__, lb->dbg_number, bytes);
+
+       return 0;
+}
+
+static int ps3_vuart_handle_interrupt_disconnect(
+       struct ps3_vuart_port_device *dev)
+{
+       dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+       BUG_ON("no support");
+       return -1;
+}
+
+/**
+ * ps3_vuart_handle_port_interrupt - second stage interrupt handler
+ *
+ * Services any pending interrupt types for the port.  Passes control to the
+ * third stage type specific interrupt handler.  Returns control to the first
+ * stage handler after one iteration.
+ */
+
+static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
+{
+       int result;
+       unsigned long status;
+
+       result = ps3_vuart_get_interrupt_mask(dev, &status);
+
+       if (result)
+               return result;
+
+       dev_dbg(&dev->core, "%s:%d: status: %lxh\n", __func__, __LINE__,
+               status);
+
+       if (status & INTERRUPT_MASK_DISCONNECT) {
+               dev->stats.disconnect_interrupts++;
+               result = ps3_vuart_handle_interrupt_disconnect(dev);
+               if (result)
+                       ps3_vuart_disable_interrupt_disconnect(dev);
+       }
+
+       if (status & INTERRUPT_MASK_TX) {
+               dev->stats.tx_interrupts++;
+               result = ps3_vuart_handle_interrupt_tx(dev);
+               if (result)
+                       ps3_vuart_disable_interrupt_tx(dev);
+       }
+
+       if (status & INTERRUPT_MASK_RX) {
+               dev->stats.rx_interrupts++;
+               result = ps3_vuart_handle_interrupt_rx(dev);
+               if (result)
+                       ps3_vuart_disable_interrupt_rx(dev);
+       }
+
+       return 0;
+}
+
+struct vuart_private {
+       unsigned int in_use;
+       unsigned int virq;
+       struct ps3_vuart_port_device *devices[PORT_COUNT];
+       const struct ports_bmp bmp;
+};
+
+/**
+ * ps3_vuart_irq_handler - first stage interrupt handler
+ *
+ * Loops finding any interrupting port and its associated instance data.
+ * Passes control to the second stage port specific interrupt handler.  Loops
+ * until all outstanding interrupts are serviced.
+ */
+
+static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
+{
+       struct vuart_private *private;
+
+       BUG_ON(!_private);
+       private = (struct vuart_private *)_private;
+
+       while (1) {
+               unsigned int port;
+
+               dump_ports_bmp(&private->bmp);
+
+               port = (BITS_PER_LONG - 1) - __ilog2(private->bmp.status);
+
+               if (port == BITS_PER_LONG)
+                       break;
+
+               BUG_ON(port >= PORT_COUNT);
+               BUG_ON(!private->devices[port]);
+
+               ps3_vuart_handle_port_interrupt(private->devices[port]);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int ps3_vuart_match(struct device *_dev, struct device_driver *_drv)
+{
+       int result;
+       struct ps3_vuart_port_driver *drv = to_ps3_vuart_port_driver(_drv);
+       struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
+
+       result = dev->match_id == drv->match_id;
+
+       dev_info(&dev->core, "%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__,
+               __LINE__, dev->match_id, dev->core.bus_id, drv->match_id,
+               drv->core.name, (result ? "match" : "miss"));
+
+       return result;
+}
+
+static struct vuart_private vuart_private;
+
+static int ps3_vuart_probe(struct device *_dev)
+{
+       int result;
+       unsigned long tmp;
+       struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
+       struct ps3_vuart_port_driver *drv =
+               to_ps3_vuart_port_driver(_dev->driver);
+
+       dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+       BUG_ON(!drv);
+
+       result = ps3_vuart_match_id_to_port(dev->match_id, &dev->port_number);
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: unknown match_id (%d)\n",
+                       __func__, __LINE__, dev->match_id);
+               result = -EINVAL;
+               goto fail_match;
+       }
+
+       if (vuart_private.devices[dev->port_number]) {
+               dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
+                       __LINE__, dev->port_number);
+               result = -EBUSY;
+               goto fail_match;
+       }
+
+       vuart_private.devices[dev->port_number] = dev;
+
+       INIT_LIST_HEAD(&dev->tx_list.head);
+       spin_lock_init(&dev->tx_list.lock);
+       INIT_LIST_HEAD(&dev->rx_list.head);
+       spin_lock_init(&dev->rx_list.lock);
+
+       vuart_private.in_use++;
+       if (vuart_private.in_use == 1) {
+               result = ps3_alloc_vuart_irq((void*)&vuart_private.bmp.status,
+                       &vuart_private.virq);
+
+               if (result) {
+                       dev_dbg(&dev->core,
+                               "%s:%d: ps3_alloc_vuart_irq failed (%d)\n",
+                               __func__, __LINE__, result);
+                       result = -EPERM;
+                       goto fail_alloc_irq;
+               }
+
+               result = request_irq(vuart_private.virq, ps3_vuart_irq_handler,
+                       IRQF_DISABLED, "vuart", &vuart_private);
+
+               if (result) {
+                       dev_info(&dev->core, "%s:%d: request_irq failed (%d)\n",
+                               __func__, __LINE__, result);
+                       goto fail_request_irq;
+               }
+       }
+
+       ps3_vuart_set_interrupt_mask(dev, INTERRUPT_MASK_RX);
+
+       /* clear stale pending interrupts */
+       ps3_vuart_get_interrupt_mask(dev, &tmp);
+
+       ps3_vuart_set_triggers(dev, 1, 1);
+
+       if (drv->probe)
+               result = drv->probe(dev);
+       else {
+               result = 0;
+               dev_info(&dev->core, "%s:%d: no probe method\n", __func__,
+                       __LINE__);
+       }
+
+       if (result) {
+               dev_dbg(&dev->core, "%s:%d: drv->probe failed\n",
+                       __func__, __LINE__);
+               goto fail_probe;
+       }
+
+       return result;
+
+fail_probe:
+fail_request_irq:
+       vuart_private.in_use--;
+       if (!vuart_private.in_use) {
+               ps3_free_vuart_irq(vuart_private.virq);
+               vuart_private.virq = NO_IRQ;
+       }
+fail_alloc_irq:
+fail_match:
+       dev_dbg(&dev->core, "%s:%d failed\n", __func__, __LINE__);
+       return result;
+}
+
+static int ps3_vuart_remove(struct device *_dev)
+{
+       struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
+       struct ps3_vuart_port_driver *drv =
+               to_ps3_vuart_port_driver(_dev->driver);
+
+       dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
+               dev->core.bus_id);
+
+       BUG_ON(vuart_private.in_use < 1);
+
+       if (drv->remove)
+               drv->remove(dev);
+       else
+               dev_dbg(&dev->core, "%s:%d: %s no remove method\n", __func__,
+                       __LINE__, dev->core.bus_id);
+
+       vuart_private.in_use--;
+
+       if (!vuart_private.in_use) {
+               free_irq(vuart_private.virq, &vuart_private);
+               ps3_free_vuart_irq(vuart_private.virq);
+               vuart_private.virq = NO_IRQ;
+       }
+       return 0;
+}
+
+/**
+ * ps3_vuart - The vuart instance.
+ *
+ * The vuart is managed as a bus that port devices connect to.
+ */
+
+struct bus_type ps3_vuart = {
+        .name = "ps3_vuart",
+       .match = ps3_vuart_match,
+       .probe = ps3_vuart_probe,
+       .remove = ps3_vuart_remove,
+};
+
+int __init ps3_vuart_init(void)
+{
+       int result;
+
+       pr_debug("%s:%d:\n", __func__, __LINE__);
+       result = bus_register(&ps3_vuart);
+       BUG_ON(result);
+       return result;
+}
+
+void __exit ps3_vuart_exit(void)
+{
+       pr_debug("%s:%d:\n", __func__, __LINE__);
+       bus_unregister(&ps3_vuart);
+}
+
+core_initcall(ps3_vuart_init);
+module_exit(ps3_vuart_exit);
+
+/**
+ * ps3_vuart_port_release_device - Remove a vuart port device.
+ */
+
+static void ps3_vuart_port_release_device(struct device *_dev)
+{
+       struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
+#if defined(DEBUG)
+       memset(dev, 0xad, sizeof(struct ps3_vuart_port_device));
+#endif
+       kfree(dev);
+}
+
+/**
+ * ps3_vuart_port_device_register - Add a vuart port device.
+ */
+
+int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev)
+{
+       int result;
+       static unsigned int dev_count = 1;
+
+       dev->core.parent = NULL;
+       dev->core.bus = &ps3_vuart;
+       dev->core.release = ps3_vuart_port_release_device;
+
+       snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "vuart_%02x",
+               dev_count++);
+
+       dev_dbg(&dev->core, "%s:%d register\n", __func__, __LINE__);
+
+       result = device_register(&dev->core);
+
+       return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_vuart_port_device_register);
+
+/**
+ * ps3_vuart_port_driver_register - Add a vuart port device driver.
+ */
+
+int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv)
+{
+       int result;
+
+       pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
+       drv->core.bus = &ps3_vuart;
+       result = driver_register(&drv->core);
+       return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
+
+/**
+ * ps3_vuart_port_driver_unregister - Remove a vuart port device driver.
+ */
+
+void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
+{
+       driver_unregister(&drv->core);
+}
+
+EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
new file mode 100644 (file)
index 0000000..28fd89f
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ *  PS3 virtual uart
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_PS3_VUART_H)
+#define _PS3_VUART_H
+
+struct ps3_vuart_stats {
+       unsigned long bytes_written;
+       unsigned long bytes_read;
+       unsigned long tx_interrupts;
+       unsigned long rx_interrupts;
+       unsigned long disconnect_interrupts;
+};
+
+/**
+ * struct ps3_vuart_port_device - a device on a vuart port
+ */
+
+struct ps3_vuart_port_device {
+       enum ps3_match_id match_id;
+       struct device core;
+
+       /* private driver variables */
+       unsigned int port_number;
+       unsigned long interrupt_mask;
+       struct {
+               spinlock_t lock;
+               struct list_head head;
+       } tx_list;
+       struct {
+               unsigned long bytes_held;
+               spinlock_t lock;
+               struct list_head head;
+       } rx_list;
+       struct ps3_vuart_stats stats;
+};
+
+/**
+ * struct ps3_vuart_port_driver - a driver for a device on a vuart port
+ */
+
+struct ps3_vuart_port_driver {
+       enum ps3_match_id match_id;
+       struct device_driver core;
+       int (*probe)(struct ps3_vuart_port_device *);
+       int (*remove)(struct ps3_vuart_port_device *);
+       int (*tx_event)(struct ps3_vuart_port_device *dev);
+       int (*rx_event)(struct ps3_vuart_port_device *dev);
+       int (*disconnect_event)(struct ps3_vuart_port_device *dev);
+       /* int (*suspend)(struct ps3_vuart_port_device *, pm_message_t); */
+       /* int (*resume)(struct ps3_vuart_port_device *); */
+};
+
+int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev);
+int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
+void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
+int ps3_vuart_write(struct ps3_vuart_port_device *dev,
+       const void* buf, unsigned int bytes);
+int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+       unsigned int bytes);
+static inline struct ps3_vuart_port_driver *to_ps3_vuart_port_driver(
+       struct device_driver *_drv)
+{
+       return container_of(_drv, struct ps3_vuart_port_driver, core);
+}
+static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
+       struct device *_dev)
+{
+       return container_of(_dev, struct ps3_vuart_port_device, core);
+}
+
+int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
+       unsigned int bytes);
+int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+       unsigned int bytes);
+
+#endif
index eb550b339bb829c1473f4f84ffd271b9617f954a..38f70ac03becfacda7fff453c58d750f1df4f861 100644 (file)
 /*
  *      file system option (superblock flag)
  */
-/* mount time flag to disable journaling to disk */
-#define JFS_NOINTEGRITY 0x00000010
+
+/* directory option */
+#define JFS_UNICODE    0x00000001      /* unicode name */
 
 /* mount time flags for error handling */
 #define JFS_ERR_REMOUNT_RO 0x00000002   /* remount read-only */
 #define JFS_ERR_CONTINUE   0x00000004   /* continue */
 #define JFS_ERR_PANIC      0x00000008   /* panic */
 
+/* Quota support */
 #define        JFS_USRQUOTA    0x00000010
 #define        JFS_GRPQUOTA    0x00000020
 
-/* platform option (conditional compilation) */
-#define JFS_AIX                0x80000000      /* AIX support */
-/*     POSIX name/directory  support */
-
-#define JFS_OS2                0x40000000      /* OS/2 support */
-/*     case-insensitive name/directory support */
-
-#define JFS_DFS                0x20000000      /* DCE DFS LFS support */
-
-#define JFS_LINUX      0x10000000      /* Linux support */
-/*     case-sensitive name/directory support */
-
-/* directory option */
-#define JFS_UNICODE    0x00000001      /* unicode name */
+/* mount time flag to disable journaling to disk */
+#define JFS_NOINTEGRITY 0x00000040
 
 /* commit option */
 #define        JFS_COMMIT      0x00000f00      /* commit option mask */
@@ -61,6 +51,7 @@
 #define        JFS_LAZYCOMMIT  0x00000200      /* lazy commit */
 #define        JFS_TMPFS       0x00000400      /* temporary file system -
                                         * do not log/commit:
+                                        * Never implemented
                                         */
 
 /* log logical volume option */
 #define JFS_SPARSE     0x00020000      /* sparse regular file */
 
 /* DASD Limits         F226941 */
-#define JFS_DASD_ENABLED       0x00040000      /* DASD limits enabled */
-#define        JFS_DASD_PRIME          0x00080000      /* Prime DASD usage on boot */
+#define JFS_DASD_ENABLED 0x00040000    /* DASD limits enabled */
+#define        JFS_DASD_PRIME  0x00080000      /* Prime DASD usage on boot */
 
 /* big endian flag */
-#define        JFS_SWAP_BYTES          0x00100000      /* running on big endian computer */
+#define        JFS_SWAP_BYTES  0x00100000      /* running on big endian computer */
 
 /* Directory index */
-#define JFS_DIR_INDEX          0x00200000      /* Persistent index for */
-                                               /* directory entries    */
+#define JFS_DIR_INDEX  0x00200000      /* Persistent index for */
 
+/* platform options */
+#define JFS_LINUX      0x10000000      /* Linux support */
+#define JFS_DFS                0x20000000      /* DCE DFS LFS support */
+/*     Never implemented */
+
+#define JFS_OS2                0x40000000      /* OS/2 support */
+/*     case-insensitive name/directory support */
+
+#define JFS_AIX                0x80000000      /* AIX support */
+/*     POSIX name/directory  support - Never implemented*/
 
 /*
  *     buffer cache configuration
diff --git a/include/asm-avr32/arch-at32ap/at32ap7000.h b/include/asm-avr32/arch-at32ap/at32ap7000.h
new file mode 100644 (file)
index 0000000..ba85e04
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Pin definitions for AT32AP7000.
+ *
+ * Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARCH_AT32AP7000_H__
+#define __ASM_ARCH_AT32AP7000_H__
+
+#define GPIO_PERIPH_A  0
+#define GPIO_PERIPH_B  1
+
+#define NR_GPIO_CONTROLLERS    4
+
+/*
+ * Pin numbers identifying specific GPIO pins on the chip. They can
+ * also be converted to IRQ numbers by passing them through
+ * gpio_to_irq().
+ */
+#define GPIO_PIOA_BASE (0)
+#define GPIO_PIOB_BASE (GPIO_PIOA_BASE + 32)
+#define GPIO_PIOC_BASE (GPIO_PIOB_BASE + 32)
+#define GPIO_PIOD_BASE (GPIO_PIOC_BASE + 32)
+
+#define GPIO_PIN_PA(N) (GPIO_PIOA_BASE + (N))
+#define GPIO_PIN_PB(N) (GPIO_PIOB_BASE + (N))
+#define GPIO_PIN_PC(N) (GPIO_PIOC_BASE + (N))
+#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
+
+#endif /* __ASM_ARCH_AT32AP7000_H__ */
index a39b3e999f18dca54774979c4f500b17fe785b3d..b120ee030c867b97a46d1d314fe376d0f5fbd2e7 100644 (file)
@@ -21,10 +21,7 @@ void at32_map_usart(unsigned int hw_id, unsigned int line);
 struct platform_device *at32_add_device_usart(unsigned int id);
 
 struct eth_platform_data {
-       u8      valid;
-       u8      mii_phy_addr;
        u8      is_rmii;
-       u8      hw_addr[6];
 };
 struct platform_device *
 at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
index 4d50421262a1e3948ee1de525bd51e0da75306b6..83c690571322af286ec19d6163691c3402efd792 100644 (file)
@@ -7,10 +7,20 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#ifndef __ASM_AVR32_AT32_PORTMUX_H__
-#define __ASM_AVR32_AT32_PORTMUX_H__
+#ifndef __ASM_ARCH_PORTMUX_H__
+#define __ASM_ARCH_PORTMUX_H__
 
-void portmux_set_func(unsigned int portmux_id, unsigned int pin_id,
-                     unsigned int function_id);
+/*
+ * Set up pin multiplexing, called from board init only.
+ *
+ * The following flags determine the initial state of the pin.
+ */
+#define AT32_GPIOF_PULLUP      0x00000001      /* Enable pull-up */
+#define AT32_GPIOF_OUTPUT      0x00000002      /* Enable output driver */
+#define AT32_GPIOF_HIGH                0x00000004      /* Set output high */
+
+void at32_select_periph(unsigned int pin, unsigned int periph,
+                       unsigned long flags);
+void at32_select_gpio(unsigned int pin, unsigned long flags);
 
-#endif /* __ASM_AVR32_AT32_PORTMUX_H__ */
+#endif /* __ASM_ARCH_PORTMUX_H__ */
index 0580b5d62bba1385073f2b7a7d0218fc78a58f92..5c01e27f0b41d61ac58f3c49e9f2a38aabff1b4c 100644 (file)
@@ -109,7 +109,7 @@ static inline dma_addr_t
 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
               enum dma_data_direction direction)
 {
-       dma_cache_sync(cpu_addr, size, direction);
+       dma_cache_sync(dev, cpu_addr, size, direction);
        return virt_to_bus(cpu_addr);
 }
 
@@ -211,7 +211,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 
                sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
                virt = page_address(sg[i].page) + sg[i].offset;
-               dma_cache_sync(virt, sg[i].length, direction);
+               dma_cache_sync(dev, virt, sg[i].length, direction);
        }
 
        return nents;
@@ -256,14 +256,14 @@ static inline void
 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
                        size_t size, enum dma_data_direction direction)
 {
-       dma_cache_sync(bus_to_virt(dma_handle), size, direction);
+       dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
 }
 
 static inline void
 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
                           size_t size, enum dma_data_direction direction)
 {
-       dma_cache_sync(bus_to_virt(dma_handle), size, direction);
+       dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
 }
 
 /**
@@ -286,7 +286,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
        int i;
 
        for (i = 0; i < nents; i++) {
-               dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+               dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
                               sg[i].length, direction);
        }
 }
@@ -298,7 +298,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        int i;
 
        for (i = 0; i < nents; i++) {
-               dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+               dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
                               sg[i].length, direction);
        }
 }
index 55a0152feb08c01246b8a58387a1f0eea0826670..432653d7ae09326dafde8ef0b23504ca27b69300 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include <linux/types.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 #define COMPAT_USER_HZ 100
 
index 806213ce31b6cc5447c2f72cec86ec5c73ae49ee..25f0c3f39adf14e0ad7eae21ae03f05477d9af41 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef __ASM_MACH_IP27_IRQ_H
 #define __ASM_MACH_IP27_IRQ_H
 
-#include <asm/sn/arch.h>
-
 /*
  * A hardwired interrupt number is completly stupid for this system - a
  * large configuration might have thousands if not tenthousands of
index a13b715fd9caf5d028af465941d3f6c1f08ff193..44790fdc5d00d989fbf7c5fbd81d007f3119876d 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASM_MACH_TOPOLOGY_H
 #define _ASM_MACH_TOPOLOGY_H   1
 
-#include <asm/sn/arch.h>
 #include <asm/sn/hub.h>
 #include <asm/mmzone.h>
 
index 51174af6ac5213608f3f85a0ecc90749adabe421..da523de628be98302df8d306f7ddf3666a6907dd 100644 (file)
@@ -18,7 +18,6 @@
 #endif
 
 typedef u64    hubreg_t;
-typedef u64    nic_t;
 
 #define cputonasid(cpu)                        (cpu_data[(cpu)].p_nasid)
 #define cputoslice(cpu)                        (cpu_data[(cpu)].p_slice)
index 15d70ca561870cb9d48db00c7183b9377df0ee0a..82aeb9e322db9bde8a223a9c4ac30ba05478c308 100644 (file)
@@ -61,6 +61,8 @@
 #endif /* CONFIG_SGI_IP35 */
 #endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */
 
+typedef u64  nic_t;
+
 #define KLCFGINFO_MAGIC        0xbeedbabe
 
 typedef s32 klconf_off_t;
index 1e637381c118db47c3694253bf6bfbdb75fe9a92..703970fb0ec0d5d7bdf6b715c2becb7bbf7bd5f3 100644 (file)
@@ -17,7 +17,6 @@ header-y += ipc.h
 header-y += poll.h
 header-y += shmparam.h
 header-y += sockios.h
-header-y += spu_info.h
 header-y += ucontext.h
 header-y += ioctl.h
 header-y += linkage.h
@@ -37,6 +36,7 @@ unifdef-y += posix_types.h
 unifdef-y += ptrace.h
 unifdef-y += seccomp.h
 unifdef-y += signal.h
+unifdef-y += spu_info.h
 unifdef-y += termios.h
 unifdef-y += types.h
 unifdef-y += unistd.h
index 978b2c7e84eaf25f9d32a7ffb06c8744ed65fdf4..709568879f731e2367009c778465b639c997775a 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-struct bug_entry {
-       unsigned long   bug_addr;
-       long            line;
-       const char      *file;
-       const char      *function;
-};
-
-struct bug_entry *find_bug(unsigned long bugaddr);
-
-/*
- * If this bit is set in the line number it means that the trap
- * is for WARN_ON rather than BUG or BUG_ON.
- */
-#define BUG_WARNING_TRAP       0x1000000
-
 #ifdef CONFIG_BUG
 
+/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
+   sizeof(struct bug_entry), respectively */
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _EMIT_BUG_ENTRY                                \
+       ".section __bug_table,\"a\"\n"          \
+       "2:\t" PPC_LONG "1b, %0\n"              \
+       "\t.short %1, %2\n"                     \
+       ".org 2b+%3\n"                          \
+       ".previous\n"
+#else
+#define _EMIT_BUG_ENTRY                                \
+       ".section __bug_table,\"a\"\n"          \
+       "2:\t" PPC_LONG "1b\n"                  \
+       "\t.short %2\n"                         \
+       ".org 2b+%3\n"                          \
+       ".previous\n"
+#endif
+
 /*
  * BUG_ON() and WARN_ON() do their best to cooperate with compile-time
  * optimisations. However depending on the complexity of the condition
  * some compiler versions may not produce optimal results.
  */
 
-#define BUG() do {                                                      \
-       __asm__ __volatile__(                                            \
-               "1:     twi 31,0,0\n"                                    \
-               ".section __bug_table,\"a\"\n"                           \
-               "\t"PPC_LONG"   1b,%0,%1,%2\n"                           \
-               ".previous"                                              \
-               : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
+#define BUG() do {                                             \
+       __asm__ __volatile__(                                   \
+               "1:     twi 31,0,0\n"                           \
+               _EMIT_BUG_ENTRY                                 \
+               : : "i" (__FILE__), "i" (__LINE__),             \
+                   "i" (0), "i"  (sizeof(struct bug_entry)));  \
+       for(;;) ;                                               \
 } while (0)
 
 #define BUG_ON(x) do {                                         \
@@ -51,23 +54,21 @@ struct bug_entry *find_bug(unsigned long bugaddr);
                        BUG();                                  \
        } else {                                                \
                __asm__ __volatile__(                           \
-               "1:     "PPC_TLNEI"     %0,0\n"                 \
-               ".section __bug_table,\"a\"\n"                  \
-               "\t"PPC_LONG"   1b,%1,%2,%3\n"                  \
-               ".previous"                                     \
-               : : "r" ((long)(x)), "i" (__LINE__),            \
-                   "i" (__FILE__), "i" (__FUNCTION__));        \
+               "1:     "PPC_TLNEI"     %4,0\n"                 \
+               _EMIT_BUG_ENTRY                                 \
+               : : "i" (__FILE__), "i" (__LINE__), "i" (0),    \
+                 "i" (sizeof(struct bug_entry)),               \
+                 "r" ((long)(x)));                             \
        }                                                       \
 } while (0)
 
 #define __WARN() do {                                          \
        __asm__ __volatile__(                                   \
                "1:     twi 31,0,0\n"                           \
-               ".section __bug_table,\"a\"\n"                  \
-               "\t"PPC_LONG"   1b,%0,%1,%2\n"                  \
-               ".previous"                                     \
-               : : "i" (__LINE__ + BUG_WARNING_TRAP),          \
-                   "i" (__FILE__), "i" (__FUNCTION__));        \
+               _EMIT_BUG_ENTRY                                 \
+               : : "i" (__FILE__), "i" (__LINE__),             \
+                 "i" (BUGFLAG_WARNING),                        \
+                 "i" (sizeof(struct bug_entry)));              \
 } while (0)
 
 #define WARN_ON(x) ({                                          \
@@ -77,13 +78,12 @@ struct bug_entry *find_bug(unsigned long bugaddr);
                        __WARN();                               \
        } else {                                                \
                __asm__ __volatile__(                           \
-               "1:     "PPC_TLNEI"     %0,0\n"                 \
-               ".section __bug_table,\"a\"\n"                  \
-               "\t"PPC_LONG"   1b,%1,%2,%3\n"                  \
-               ".previous"                                     \
-               : : "r" (__ret_warn_on),                        \
-                   "i" (__LINE__ + BUG_WARNING_TRAP),          \
-                   "i" (__FILE__), "i" (__FUNCTION__));        \
+               "1:     "PPC_TLNEI"     %4,0\n"                 \
+               _EMIT_BUG_ENTRY                                 \
+               : : "i" (__FILE__), "i" (__LINE__),             \
+                 "i" (BUGFLAG_WARNING),                        \
+                 "i" (sizeof(struct bug_entry)),               \
+                 "r" (__ret_warn_on));                         \
        }                                                       \
        unlikely(__ret_warn_on);                                \
 })
index 6fe5c9d4ca3b946e2ad5f348070851b6e039d086..7384b8086b75411e2bc001f60184e344e42b31cf 100644 (file)
@@ -126,6 +126,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTR_NODSISRALIGN           ASM_CONST(0x0000000000100000)
 #define CPU_FTR_PPC_LE                 ASM_CONST(0x0000000000200000)
 #define CPU_FTR_REAL_LE                        ASM_CONST(0x0000000000400000)
+#define CPU_FTR_FPU_UNAVAILABLE                ASM_CONST(0x0000000000800000)
 
 /*
  * Add the 64-bit processor unique features in the top half of the word;
@@ -152,6 +153,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTR_PURR                   LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_CELL_TB_BUG            LONG_ASM_CONST(0x0000800000000000)
 #define CPU_FTR_SPURR                  LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_DSCR                   LONG_ASM_CONST(0x0002000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -295,6 +297,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTRS_E300  (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
            CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
            CPU_FTR_COMMON)
+#define CPU_FTRS_E300C2        (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
+           CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+           CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
 #define CPU_FTRS_CLASSIC32     (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
            CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
 #define CPU_FTRS_8XX   (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB)
@@ -330,13 +335,14 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-           CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE)
+           CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+           CPU_FTR_DSCR)
 #define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
            CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \
-           CPU_FTR_SPURR | CPU_FTR_REAL_LE)
+           CPU_FTR_SPURR | CPU_FTR_REAL_LE | CPU_FTR_DSCR)
 #define CPU_FTRS_CELL  (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -364,7 +370,8 @@ enum {
            CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
            CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
            CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
-           CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
+           CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
+           CPU_FTRS_CLASSIC32 |
 #else
            CPU_FTRS_GENERIC_32 |
 #endif
@@ -403,7 +410,8 @@ enum {
            CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
            CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
            CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
-           CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
+           CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
+           CPU_FTRS_CLASSIC32 &
 #else
            CPU_FTRS_GENERIC_32 &
 #endif
index fd4a5f5e33d16588ef4c92e7bdbde279901d5e5b..d7a1bc1551c603086c97a2ee1c85f9ad53803172 100644 (file)
@@ -20,8 +20,7 @@
 #ifndef _ASM_POWERPC_DCR_NATIVE_H
 #define _ASM_POWERPC_DCR_NATIVE_H
 #ifdef __KERNEL__
-
-#include <asm/reg.h>
+#ifndef __ASSEMBLY__
 
 typedef struct {} dcr_host_t;
 
@@ -32,7 +31,41 @@ typedef struct {} dcr_host_t;
 #define dcr_read(host, dcr_n)          mfdcr(dcr_n)
 #define dcr_write(host, dcr_n, value)  mtdcr(dcr_n, value)
 
+/* Device Control Registers */
+void __mtdcr(int reg, unsigned int val);
+unsigned int __mfdcr(int reg);
+#define mfdcr(rn)                                              \
+       ({unsigned int rval;                                    \
+       if (__builtin_constant_p(rn))                           \
+               asm volatile("mfdcr %0," __stringify(rn)        \
+                             : "=r" (rval));                   \
+       else                                                    \
+               rval = __mfdcr(rn);                             \
+       rval;})
+
+#define mtdcr(rn, v)                                           \
+do {                                                           \
+       if (__builtin_constant_p(rn))                           \
+               asm volatile("mtdcr " __stringify(rn) ",%0"     \
+                             : : "r" (v));                     \
+       else                                                    \
+               __mtdcr(rn, v);                                 \
+} while (0)
+
+/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
+#define mfdcri(base, reg)                      \
+({                                             \
+       mtdcr(base ## _CFGADDR, base ## _ ## reg);      \
+       mfdcr(base ## _CFGDATA);                        \
+})
+
+#define mtdcri(base, reg, data)                        \
+do {                                           \
+       mtdcr(base ## _CFGADDR, base ## _ ## reg);      \
+       mtdcr(base ## _CFGDATA, data);          \
+} while (0)
 
+#endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_DCR_NATIVE_H */
 
index 473f2c7fd89250c3603cd594f3a4754f9741793a..b66c5e6941f01cabfc128a7b0f4082ac71555fae 100644 (file)
@@ -20,6 +20,7 @@
 #ifndef _ASM_POWERPC_DCR_H
 #define _ASM_POWERPC_DCR_H
 #ifdef __KERNEL__
+#ifdef CONFIG_PPC_DCR
 
 #ifdef CONFIG_PPC_DCR_NATIVE
 #include <asm/dcr-native.h>
@@ -38,5 +39,6 @@ extern unsigned int dcr_resource_len(struct device_node *np,
                                     unsigned int index);
 #endif /* CONFIG_PPC_MERGE */
 
+#endif /* CONFIG_PPC_DCR */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_DCR_H */
index d604863d72fb08b9b69a757fe20f117edb8e3cae..9e4dd98eb220ca65a409ffe475bc59a762f84f55 100644 (file)
@@ -107,25 +107,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
 
 #endif /* CONFIG_PPC64 */
 
-#define mask_irq(irq)                                          \
-       ({                                                      \
-               irq_desc_t *desc = get_irq_desc(irq);           \
-               if (desc->chip && desc->chip->disable)  \
-                       desc->chip->disable(irq);               \
-       })
-#define unmask_irq(irq)                                                \
-       ({                                                      \
-               irq_desc_t *desc = get_irq_desc(irq);           \
-               if (desc->chip && desc->chip->enable)   \
-                       desc->chip->enable(irq);                \
-       })
-#define ack_irq(irq)                                           \
-       ({                                                      \
-               irq_desc_t *desc = get_irq_desc(irq);           \
-               if (desc->chip && desc->chip->ack)      \
-                       desc->chip->ack(irq);           \
-       })
-
 /*
  * interrupt-retrigger: should we handle this via lost interrupts and IPIs
  * or should we not care like we do now ? --BenH.
index 584fabfb4f08aa9eccfb688727e109da3d021530..e5f14b13ccf0ff2c11ef8392b6697750608ca919 100644 (file)
@@ -46,8 +46,6 @@ struct mod_arch_specific {
        unsigned int num_bugs;
 };
 
-extern struct bug_entry *module_find_bug(unsigned long bugaddr);
-
 /*
  * Select ELF headers.
  * Make empty section for module_frob_arch_sections to expand.
index 7bb7f90098066f81d3ec216141c21294c26ea2ea..cb02c9d1ef938f0c0d8bcdc9169d22236012c543 100644 (file)
@@ -31,12 +31,12 @@ struct pci_controller {
        int last_busno;
 
        void __iomem *io_base_virt;
-       unsigned long io_base_phys;
+       resource_size_t io_base_phys;
 
        /* Some machines have a non 1:1 mapping of
         * the PCI memory space in the CPU bus space
         */
-       unsigned long pci_mem_offset;
+       resource_size_t pci_mem_offset;
        unsigned long pci_io_size;
 
        struct pci_ops *ops;
index 16f13319c7693fbb55cea896bab704885ebd4357..ac656ee6bb193215db3cc9a9c6353726f3963a0e 100644 (file)
@@ -143,8 +143,13 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
 #define HAVE_PCI_MMAP  1
 
-#ifdef CONFIG_PPC64
-/* pci_unmap_{single,page} is not a nop, thus... */
+#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
+/*
+ * For 64-bit kernels, pci_unmap_{single,page} is not a nop.
+ * For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
+ * so on are not nops.
+ * and thus...
+ */
 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
        dma_addr_t ADDR_NAME;
 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
@@ -158,6 +163,20 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
 #define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
        (((PTR)->LEN_NAME) = (VAL))
 
+#else /* 32-bit && coherent */
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME)         (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)        do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME)           (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)  do { } while (0)
+
+#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
+
+#ifdef CONFIG_PPC64
+
 /* The PCI address space does not equal the physical memory address
  * space (we have an IOMMU).  The IDE and SCSI device layers use
  * this boolean for bounce buffer decisions.
@@ -172,16 +191,8 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
  */
 #define PCI_DMA_BUS_IS_PHYS     (1)
 
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME)         (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)        do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME)           (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL)  do { } while (0)
-
 #endif /* CONFIG_PPC64 */
-       
+
 extern void pcibios_resource_to_bus(struct pci_dev *dev,
                        struct pci_bus_region *region,
                        struct resource *res);
index 6faae7b14d5540f0e5cdcd13b3a31c195d7d980b..a3631b15754ca39bfd49984d631635e56b8f8cf2 100644 (file)
 
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_CTR       0x009   /* Count Register */
+#define SPRN_DSCR      0x11
 #define SPRN_CTRLF     0x088
 #define SPRN_CTRLT     0x098
 #define   CTRL_CT      0xc0000000      /* current thread */
 #define SPRN_TBRU      0x10D   /* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL      0x11C   /* Time Base Lower Register (super, R/W) */
 #define SPRN_TBWU      0x11D   /* Time Base Upper Register (super, R/W) */
+#define SPRN_SPURR     0x134   /* Scaled PURR */
 #define SPRN_HIOR      0x137   /* 970 Hypervisor interrupt offset */
 #define SPRN_DBAT0L    0x219   /* Data BAT 0 Lower Register */
 #define SPRN_DBAT0U    0x218   /* Data BAT 0 Upper Register */
index 5a0c136c04168f94e8d235a6308702c4cf712c5e..8eaa7b28d9d07d59ad6d2f0a6cb83187380ec8b3 100644 (file)
@@ -159,6 +159,7 @@ extern struct rtas_t rtas;
 
 extern void enter_rtas(unsigned long);
 extern int rtas_token(const char *service);
+extern int rtas_service_present(const char *service);
 extern int rtas_call(int token, int, int, int *, ...);
 extern void rtas_restart(char *cmd);
 extern void rtas_power_off(void);
@@ -221,8 +222,6 @@ extern int rtas_get_error_log_max(void);
 extern spinlock_t rtas_data_buf_lock;
 extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
 
-extern void rtas_stop_self(void);
-
 /* RMO buffer reserved for user-space RTAS use */
 extern unsigned long rtas_rmo_buf;
 
index 6c955d0c1ef0d217199f30ea358dd1f03cf45e31..4d35b844bc58e8fd7e110bc3e620b65e691bf631 100644 (file)
@@ -20,8 +20,8 @@ extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
 extern struct pci_controller* pcibios_alloc_controller(void);
 
 /* Helper function for setting up resources */
-extern void pci_init_resource(struct resource *res, unsigned long start,
-                             unsigned long end, int flags, char *name);
+extern void pci_init_resource(struct resource *res, resource_size_t start,
+                             resource_size_t end, int flags, char *name);
 
 /* Get the PCI host controller for a bus */
 extern struct pci_controller* pci_bus_to_hose(int bus);
@@ -50,12 +50,12 @@ struct pci_controller {
        int bus_offset;
 
        void __iomem *io_base_virt;
-       unsigned long io_base_phys;
+       resource_size_t io_base_phys;
 
        /* Some machines (PReP) have a non 1:1 mapping of
         * the PCI memory space in the CPU bus space
         */
-       unsigned long pci_mem_offset;
+       resource_size_t pci_mem_offset;
 
        struct pci_ops *ops;
        volatile unsigned int __iomem *cfg_addr;
index 11ffaaa5da165abe9adec4d586eba35864fa71dc..9d162028dab92809d4d6b49bc9fce254ccc3932b 100644 (file)
@@ -61,6 +61,27 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
  */
 #define PCI_DMA_BUS_IS_PHYS     (1)
 
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * pci_unmap_{page,single} are NOPs but pci_dma_sync_single_for_cpu()
+ * and so on are not, so...
+ */
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
+       dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
+       __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)                 \
+       ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)                \
+       (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)                   \
+       ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
+       (((PTR)->LEN_NAME) = (VAL))
+
+#else /* coherent */
+
 /* pci_unmap_{page,single} is a nop so... */
 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
@@ -69,6 +90,8 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
 #define pci_unmap_len(PTR, LEN_NAME)           (0)
 #define pci_unmap_len_set(PTR, LEN_NAME, VAL)  do { } while (0)
 
+#endif /* CONFIG_NOT_COHERENT_CACHE */
+
 #ifdef CONFIG_PCI
 static inline void pci_dma_burst_advice(struct pci_dev *pdev,
                                        enum pci_dma_burst_strategy *strat,
index 602fbadeaf480cff1769496e9e37e3796356e7fa..a263fc1e65c41c6990cfc3f0a3ff93629c557778 100644 (file)
@@ -9,41 +9,9 @@
 #ifndef __ASM_PPC_REG_BOOKE_H__
 #define __ASM_PPC_REG_BOOKE_H__
 
-#ifndef __ASSEMBLY__
-/* Device Control Registers */
-void __mtdcr(int reg, unsigned int val);
-unsigned int __mfdcr(int reg);
-#define mfdcr(rn)                                              \
-       ({unsigned int rval;                                    \
-       if (__builtin_constant_p(rn))                           \
-               asm volatile("mfdcr %0," __stringify(rn)        \
-                             : "=r" (rval));                   \
-       else                                                    \
-               rval = __mfdcr(rn);                             \
-       rval;})
-
-#define mtdcr(rn, v)                                           \
-do {                                                           \
-       if (__builtin_constant_p(rn))                           \
-               asm volatile("mtdcr " __stringify(rn) ",%0"     \
-                             : : "r" (v));                     \
-       else                                                    \
-               __mtdcr(rn, v);                                 \
-} while (0)
-
-/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
-#define mfdcri(base, reg)                      \
-({                                             \
-       mtdcr(base ## _CFGADDR, base ## _ ## reg);      \
-       mfdcr(base ## _CFGDATA);                        \
-})
-
-#define mtdcri(base, reg, data)                        \
-do {                                           \
-       mtdcr(base ## _CFGADDR, base ## _ ## reg);      \
-       mtdcr(base ## _CFGDATA, data);          \
-} while (0)
+#include <asm/dcr.h>
 
+#ifndef __ASSEMBLY__
 /* Performance Monitor Registers */
 #define mfpmr(rn)      ({unsigned int rval; \
                        asm volatile("mfpmr %0," __stringify(rn) \
index e1c7286165ffa35c1a47e91b5b0abe56e99091ea..ea330d7b46c089f28f1b4c4f24e81af99b09150b 100644 (file)
@@ -342,7 +342,6 @@ typedef void (unplug_fn) (request_queue_t *);
 
 struct bio_vec;
 typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
-typedef void (activity_fn) (void *data, int rw);
 typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
 typedef void (softirq_done_fn)(struct request *);
@@ -384,7 +383,6 @@ struct request_queue
        prep_rq_fn              *prep_rq_fn;
        unplug_fn               *unplug_fn;
        merge_bvec_fn           *merge_bvec_fn;
-       activity_fn             *activity_fn;
        issue_flush_fn          *issue_flush_fn;
        prepare_flush_fn        *prepare_flush_fn;
        softirq_done_fn         *softirq_done_fn;
@@ -411,8 +409,6 @@ struct request_queue
         */
        void                    *queuedata;
 
-       void                    *activity_data;
-
        /*
         * queue needs bounce pages for pages above this limit
         */
@@ -677,7 +673,6 @@ extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(request_queue_t *q);
 extern void blk_run_queue(request_queue_t *);
 extern void blk_start_queueing(request_queue_t *);
-extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
 extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
 extern int blk_rq_unmap_user(struct request *);
 extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
index ed6cc8962d87b9e8c50df5059b6c475450cf1a80..1cb054bd93f2805fcb1ee88e966d75b3e498423e 100644 (file)
@@ -176,20 +176,20 @@ enum {
 };
 
 /* DCCP features (RFC 4340 section 6.4) */
- enum {
-       DCCPF_RESERVED = 0,
-       DCCPF_CCID = 1,
+enum {
+       DCCPF_RESERVED = 0,
+       DCCPF_CCID = 1,
        DCCPF_SHORT_SEQNOS = 2,         /* XXX: not yet implemented */
-       DCCPF_SEQUENCE_WINDOW = 3,
+       DCCPF_SEQUENCE_WINDOW = 3,
        DCCPF_ECN_INCAPABLE = 4,        /* XXX: not yet implemented */
-       DCCPF_ACK_RATIO = 5,
-       DCCPF_SEND_ACK_VECTOR = 6,
-       DCCPF_SEND_NDP_COUNT = 7,
+       DCCPF_ACK_RATIO = 5,
+       DCCPF_SEND_ACK_VECTOR = 6,
+       DCCPF_SEND_NDP_COUNT = 7,
        DCCPF_MIN_CSUM_COVER = 8,
        DCCPF_DATA_CHECKSUM = 9,        /* XXX: not yet implemented */
-       /* 10-127 reserved */
-       DCCPF_MIN_CCID_SPECIFIC = 128,
-       DCCPF_MAX_CCID_SPECIFIC = 255,
+       /* 10-127 reserved */
+       DCCPF_MIN_CCID_SPECIFIC = 128,
+       DCCPF_MAX_CCID_SPECIFIC = 255,
 };
 
 /* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
@@ -427,7 +427,7 @@ struct dccp_service_list {
 };
 
 #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
-#define DCCP_SERVICE_CODE_IS_ABSENT             0
+#define DCCP_SERVICE_CODE_IS_ABSENT            0
 
 static inline int dccp_list_has_service(const struct dccp_service_list *sl,
                                        const __be32 service)
@@ -436,7 +436,7 @@ static inline int dccp_list_has_service(const struct dccp_service_list *sl,
                u32 i = sl->dccpsl_nr;
                while (i--)
                        if (sl->dccpsl_list[i] == service)
-                               return 1; 
+                               return 1;
        }
        return 0;
 }
@@ -511,7 +511,7 @@ struct dccp_sock {
        __u8                            dccps_hc_tx_insert_options:1;
        struct timer_list               dccps_xmit_timer;
 };
+
 static inline struct dccp_sock *dccp_sk(const struct sock *sk)
 {
        return (struct dccp_sock *)sk;
index 3da29e2d524a57a8bc5fe80cc9942da561c1f32b..abb64c437f6fd92b447deb18768549d7cbb45ddc 100644 (file)
@@ -19,6 +19,7 @@
 #define _FSL_DEVICE_H_
 
 #include <linux/types.h>
+#include <linux/phy.h>
 
 /*
  * Some conventions on how we handle peripherals on Freescale chips
index 64e070f62a872bbf22cc376bfe9ef51d6499a4ac..e26a03981a94a0abc9f4ea59e68ae3cf27967238 100644 (file)
@@ -804,8 +804,6 @@ typedef struct hwif_s {
        void            *hwif_data;     /* extra hwif data */
 
        unsigned dma;
-
-       void (*led_act)(void *data, int rw);
 } ____cacheline_internodealigned_in_smp ide_hwif_t;
 
 /*
index 46000936f8f1d5f39e92aa1e260518475b567e1a..6b0648cfdffcf869ee59784db66e81ae8991a382 100644 (file)
@@ -44,8 +44,11 @@ typedef struct {
 #define SEQLOCK_UNLOCKED \
                 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
 
-#define seqlock_init(x) \
-               do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
+#define seqlock_init(x)                                        \
+       do {                                            \
+               (x)->sequence = 0;                      \
+               spin_lock_init(&(x)->lock);             \
+       } while (0)
 
 #define DEFINE_SEQLOCK(x) \
                seqlock_t x = __SEQLOCK_UNLOCKED(x)
index 31a9b25276fe02d5d951f243b863c735cc8fd3e4..8a8462b4a4ddcdf18dfc4fef04f135f23b02f786 100644 (file)
@@ -37,10 +37,14 @@ struct tfrc_rx_info {
  *     @tfrctx_p:      current loss event rate (5.4)
  *     @tfrctx_rto:    estimate of RTO, equals 4*RTT (4.3)
  *     @tfrctx_ipi:    inter-packet interval (4.6)
+ *
+ *  Note: X and X_recv are both maintained in units of 64 * bytes/second. This
+ *        enables a finer resolution of sending rates and avoids problems with
+ *        integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
  */
 struct tfrc_tx_info {
-       __u32 tfrctx_x;
-       __u32 tfrctx_x_recv;
+       __u64 tfrctx_x;
+       __u64 tfrctx_x_recv;
        __u32 tfrctx_x_calc;
        __u32 tfrctx_rtt;
        __u32 tfrctx_p;
index e1d116f1192359d2c037d74693200ac374adeb1a..14b72d868f03eeb479cd631f92b5caa2fb9f3216 100644 (file)
@@ -285,6 +285,8 @@ extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
 extern const ax25_address ax25_bcast;
 extern const ax25_address ax25_defaddr;
 extern const ax25_address null_ax25_address;
+extern char *ax2asc(char *buf, const ax25_address *);
+extern void asc2ax(ax25_address *addr, const char *callsign);
 extern int ax25cmp(const ax25_address *, const ax25_address *);
 extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
 extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
index 21a0616152fc4d56b72ded295163eeb93c02a3dd..97a49c79c6050df6caf43543caaff4d8bd07b3fc 100644 (file)
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ax2asc);
  */
 void asc2ax(ax25_address *addr, const char *callsign)
 {
-       char *s;
+       const char *s;
        int n;
 
        for (s = callsign, n = 0; n < 6; n++) {
index 8a271285f2f378d154d21927790e3a31cefd42a3..a01abdd2d3eaa93dce2414b450df3d2c0b685895 100644 (file)
@@ -242,22 +242,28 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
        /* don't get messages out of order, and no recursion */
        if (skb_queue_len(&npinfo->txq) == 0 &&
-           npinfo->poll_owner != smp_processor_id() &&
-           netif_tx_trylock(dev)) {
-               /* try until next clock tick */
-               for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
-                       if (!netif_queue_stopped(dev))
-                               status = dev->hard_start_xmit(skb, dev);
+                   npinfo->poll_owner != smp_processor_id()) {
+               unsigned long flags;
 
-                       if (status == NETDEV_TX_OK)
-                               break;
+               local_irq_save(flags);
+               if (netif_tx_trylock(dev)) {
+                       /* try until next clock tick */
+                       for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+                                       tries > 0; --tries) {
+                               if (!netif_queue_stopped(dev))
+                                       status = dev->hard_start_xmit(skb, dev);
 
-                       /* tickle device maybe there is some cleanup */
-                       netpoll_poll(np);
+                               if (status == NETDEV_TX_OK)
+                                       break;
 
-                       udelay(USEC_PER_POLL);
+                               /* tickle device maybe there is some cleanup */
+                               netpoll_poll(np);
+
+                               udelay(USEC_PER_POLL);
+                       }
+                       netif_tx_unlock(dev);
                }
-               netif_tx_unlock(dev);
+               local_irq_restore(flags);
        }
 
        if (status != NETDEV_TX_OK) {
index 1f4727ddbdbfbf1e8d1214ed2604bb4a120306ac..a086c6312d3b220c6695dfdb35bac7ac0fc358dc 100644 (file)
@@ -223,7 +223,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
                        gap = -new_head;
                }
                new_head += DCCP_MAX_ACKVEC_LEN;
-       } 
+       }
 
        av->dccpav_buf_head = new_head;
 
@@ -336,7 +336,7 @@ out_duplicate:
 void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
 {
        dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
-                                       (unsigned long long)ackno);
+                        (unsigned long long)ackno);
 
        while (len--) {
                const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
index bcc2d12ae81ccb13d00e428c85840b37a0ac81ec..c65cb2453e43d7836e80ab25059d3993c5d875ce 100644 (file)
@@ -43,8 +43,6 @@ struct ccid_operations {
                                                    unsigned char* value);
        int             (*ccid_hc_rx_insert_options)(struct sock *sk,
                                                     struct sk_buff *skb);
-       int             (*ccid_hc_tx_insert_options)(struct sock *sk,
-                                                    struct sk_buff *skb);
        void            (*ccid_hc_tx_packet_recv)(struct sock *sk,
                                                  struct sk_buff *skb);
        int             (*ccid_hc_tx_parse_options)(struct sock *sk,
@@ -146,14 +144,6 @@ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
        return rc;
 }
 
-static inline int ccid_hc_tx_insert_options(struct ccid *ccid, struct sock *sk,
-                                           struct sk_buff *skb)
-{
-       if (ccid->ccid_ops->ccid_hc_tx_insert_options != NULL)
-               return ccid->ccid_ops->ccid_hc_tx_insert_options(sk, skb);
-       return 0;
-}
-
 static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
                                            struct sk_buff *skb)
 {
index 2555be8f4790e4825f861fc97ab502365f5ef786..fd38b05d6f79ec146edda973d2a307653a0c01f8 100644 (file)
@@ -351,7 +351,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
 
                while (seqp != hctx->ccid2hctx_seqh) {
                        ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
-                                      (unsigned long long)seqp->ccid2s_seq,
+                                      (unsigned long long)seqp->ccid2s_seq,
                                       seqp->ccid2s_acked, seqp->ccid2s_sent);
                        seqp = seqp->ccid2s_next;
                }
@@ -473,7 +473,7 @@ static inline void ccid2_new_ack(struct sock *sk,
                /* first measurement */
                if (hctx->ccid2hctx_srtt == -1) {
                        ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
-                                      r, jiffies,
+                                      r, jiffies,
                                       (unsigned long long)seqp->ccid2s_seq);
                        ccid2_change_srtt(hctx, r);
                        hctx->ccid2hctx_rttvar = r >> 1;
@@ -518,8 +518,8 @@ static inline void ccid2_new_ack(struct sock *sk,
                hctx->ccid2hctx_lastrtt = jiffies;
 
                ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
-                              hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
-                              hctx->ccid2hctx_rto, HZ, r);
+                              hctx->ccid2hctx_srtt, hctx->ccid2hctx_rttvar,
+                              hctx->ccid2hctx_rto, HZ, r);
                hctx->ccid2hctx_sent = 0;
        }
 
@@ -667,9 +667,9 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                                /* new packet received or marked */
                                if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
                                    !seqp->ccid2s_acked) {
-                                       if (state ==
+                                       if (state ==
                                            DCCP_ACKVEC_STATE_ECN_MARKED) {
-                                               ccid2_congestion_event(hctx,
+                                               ccid2_congestion_event(hctx,
                                                                       seqp);
                                        } else
                                                ccid2_new_ack(sk, seqp,
index 66a27b9688ca9240579ce897df70637b8698eb18..fa6b75372ed701d114ea7f9b9c8596ee5f34f97e 100644 (file)
 #include "lib/tfrc.h"
 #include "ccid3.h"
 
-/*
- * Reason for maths here is to avoid 32 bit overflow when a is big.
- * With this we get close to the limit.
- */
-static u32 usecs_div(const u32 a, const u32 b)
-{
-       const u32 div = a < (UINT_MAX / (USEC_PER_SEC /    10)) ?    10 :
-                       a < (UINT_MAX / (USEC_PER_SEC /    50)) ?    50 :
-                       a < (UINT_MAX / (USEC_PER_SEC /   100)) ?   100 :
-                       a < (UINT_MAX / (USEC_PER_SEC /   500)) ?   500 :
-                       a < (UINT_MAX / (USEC_PER_SEC /  1000)) ?  1000 :
-                       a < (UINT_MAX / (USEC_PER_SEC /  5000)) ?  5000 :
-                       a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
-                       a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
-                                                                100000;
-       const u32 tmp = a * (USEC_PER_SEC / div);
-       return (b >= 2 * div) ? tmp / (b / div) : tmp;
-}
-
-
-
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 static int ccid3_debug;
 #define ccid3_pr_debug(format, a...)   DCCP_PR_DEBUG(ccid3_debug, format, ##a)
@@ -108,8 +87,9 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
 {
        timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
 
-       /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
-       hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
+       /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
+       hctx->ccid3hctx_t_ipi = scaled_div(hctx->ccid3hctx_s,
+                                          hctx->ccid3hctx_x >> 6);
 
        /* Update nominal send time with regard to the new t_ipi */
        timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
@@ -128,40 +108,44 @@ static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
  *          X = max(min(2 * X, 2 * X_recv), s / R);
  *          tld = now;
  *
+ * Note: X and X_recv are both stored in units of 64 * bytes/second, to support
+ *       fine-grained resolution of sending rates. This requires scaling by 2^6
+ *       throughout the code. Only X_calc is unscaled (in bytes/second).
+ *
  * If X has changed, we also update the scheduled send time t_now,
  * the inter-packet interval t_ipi, and the delta value.
- */ 
+ */
 static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
 
 {
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
-       const __u32 old_x = hctx->ccid3hctx_x;
+       const  __u64 old_x = hctx->ccid3hctx_x;
 
        if (hctx->ccid3hctx_p > 0) {
-               hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
-                                                    hctx->ccid3hctx_rtt,
-                                                    hctx->ccid3hctx_p);
-               hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc,
-                                                  hctx->ccid3hctx_x_recv * 2),
-                                              hctx->ccid3hctx_s / TFRC_T_MBI);
-
-       } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >=
-                                                         hctx->ccid3hctx_rtt) {
-               hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv,
-                                           hctx->ccid3hctx_x      ) * 2,
-                                       usecs_div(hctx->ccid3hctx_s,
-                                                 hctx->ccid3hctx_rtt)   );
+
+               hctx->ccid3hctx_x = min(((__u64)hctx->ccid3hctx_x_calc) << 6,
+                                       hctx->ccid3hctx_x_recv * 2);
+               hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
+                                       (((__u64)hctx->ccid3hctx_s) << 6) /
+                                                               TFRC_T_MBI);
+
+       } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) -
+                       (suseconds_t)hctx->ccid3hctx_rtt >= 0) {
+
+               hctx->ccid3hctx_x =
+                       max(2 * min(hctx->ccid3hctx_x, hctx->ccid3hctx_x_recv),
+                           scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
+                                      hctx->ccid3hctx_rtt));
                hctx->ccid3hctx_t_ld = *now;
-       } else
-               ccid3_pr_debug("Not changing X\n");
+       }
 
        if (hctx->ccid3hctx_x != old_x)
                ccid3_update_send_time(hctx);
 }
 
 /*
- *     Track the mean packet size `s' (cf. RFC 4342, 5.3 and  RFC 3448, 4.1)
- *     @len: DCCP packet payload size in bytes
+ *     Track the mean packet size `s' (cf. RFC 4342, 5.3 and  RFC 3448, 4.1)
+ *     @len: DCCP packet payload size in bytes
  */
 static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
 {
@@ -178,6 +162,33 @@ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
         */
 }
 
+/*
+ *     Update Window Counter using the algorithm from [RFC 4342, 8.1].
+ *     The algorithm is not applicable if RTT < 4 microseconds.
+ */
+static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hctx,
+                                               struct timeval *now)
+{
+       suseconds_t delta;
+       u32 quarter_rtts;
+
+       if (unlikely(hctx->ccid3hctx_rtt < 4))  /* avoid divide-by-zero */
+               return;
+
+       delta = timeval_delta(now, &hctx->ccid3hctx_t_last_win_count);
+       DCCP_BUG_ON(delta < 0);
+
+       quarter_rtts = (u32)delta / (hctx->ccid3hctx_rtt / 4);
+
+       if (quarter_rtts > 0) {
+               hctx->ccid3hctx_t_last_win_count = *now;
+               hctx->ccid3hctx_last_win_count  += min_t(u32, quarter_rtts, 5);
+               hctx->ccid3hctx_last_win_count  &= 0xF;         /* mod 16 */
+
+               ccid3_pr_debug("now at %#X\n", hctx->ccid3hctx_last_win_count);
+       }
+}
+
 static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
 {
        struct sock *sk = (struct sock *)data;
@@ -191,20 +202,20 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
                goto restart_timer;
        }
 
-       ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
+       ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk,
                       ccid3_tx_state_name(hctx->ccid3hctx_state));
-       
+
        switch (hctx->ccid3hctx_state) {
        case TFRC_SSTATE_NO_FBACK:
                /* RFC 3448, 4.4: Halve send rate directly */
-               hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2,
-                                              hctx->ccid3hctx_s / TFRC_T_MBI);
+               hctx->ccid3hctx_x = max(hctx->ccid3hctx_x / 2,
+                                       (((__u64)hctx->ccid3hctx_s) << 6) /
+                                                                   TFRC_T_MBI);
 
-               ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
-                              "bytes/s\n",
-                              dccp_role(sk), sk,
+               ccid3_pr_debug("%s(%p, state=%s), updated tx rate to %u "
+                              "bytes/s\n", dccp_role(sk), sk,
                               ccid3_tx_state_name(hctx->ccid3hctx_state),
-                              hctx->ccid3hctx_x);
+                              (unsigned)(hctx->ccid3hctx_x >> 6));
                /* The value of R is still undefined and so we can not recompute
                 * the timout value. Keep initial value as per [RFC 4342, 5]. */
                t_nfb = TFRC_INITIAL_TIMEOUT;
@@ -213,34 +224,46 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
        case TFRC_SSTATE_FBACK:
                /*
                 * Check if IDLE since last timeout and recv rate is less than
-                * 4 packets per RTT
+                * 4 packets (in units of 64*bytes/sec) per RTT
                 */
                if (!hctx->ccid3hctx_idle ||
-                   (hctx->ccid3hctx_x_recv >=
-                    4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
+                   (hctx->ccid3hctx_x_recv >= 4 *
+                    scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
+                               hctx->ccid3hctx_rtt))) {
                        struct timeval now;
 
-                       ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
+                       ccid3_pr_debug("%s(%p, state=%s), not idle\n",
                                       dccp_role(sk), sk,
-                                      ccid3_tx_state_name(hctx->ccid3hctx_state));
-                       /* Halve sending rate */
+                                  ccid3_tx_state_name(hctx->ccid3hctx_state));
 
-                       /*  If (p == 0 || X_calc > 2 * X_recv)
+                       /*
+                        *  Modify the cached value of X_recv [RFC 3448, 4.4]
+                        *
+                        *  If (p == 0 || X_calc > 2 * X_recv)
                         *    X_recv = max(X_recv / 2, s / (2 * t_mbi));
                         *  Else
                         *    X_recv = X_calc / 4;
+                        *
+                        *  Note that X_recv is scaled by 2^6 while X_calc is not
                         */
                        BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
 
                        if (hctx->ccid3hctx_p  == 0 ||
-                           hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
-                               hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
-                                                                   hctx->ccid3hctx_s / (2 * TFRC_T_MBI));
-                       else
-                               hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
-
-                       /* Update sending rate */
-                       dccp_timestamp(sk, &now);
+                           (hctx->ccid3hctx_x_calc >
+                            (hctx->ccid3hctx_x_recv >> 5))) {
+
+                               hctx->ccid3hctx_x_recv =
+                                       max(hctx->ccid3hctx_x_recv / 2,
+                                           (((__u64)hctx->ccid3hctx_s) << 6) /
+                                                         (2 * TFRC_T_MBI));
+
+                               if (hctx->ccid3hctx_p == 0)
+                                       dccp_timestamp(sk, &now);
+                       } else {
+                               hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc;
+                               hctx->ccid3hctx_x_recv <<= 4;
+                       }
+                       /* Now recalculate X [RFC 3448, 4.3, step (4)] */
                        ccid3_hc_tx_update_x(sk, &now);
                }
                /*
@@ -251,7 +274,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
                t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
                break;
        case TFRC_SSTATE_NO_SENT:
-               DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
+               DCCP_BUG("%s(%p) - Illegal state NO_SENT", dccp_role(sk), sk);
                /* fall through */
        case TFRC_SSTATE_TERM:
                goto out;
@@ -277,9 +300,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
-       struct dccp_tx_hist_entry *new_packet;
        struct timeval now;
-       long delay;
+       suseconds_t delay;
 
        BUG_ON(hctx == NULL);
 
@@ -291,34 +313,21 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
        if (unlikely(skb->len == 0))
                return -EBADMSG;
 
-       /* See if last packet allocated was not sent */
-       new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
-       if (new_packet == NULL || new_packet->dccphtx_sent) {
-               new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
-                                                   GFP_ATOMIC);
-
-               if (unlikely(new_packet == NULL)) {
-                       DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
-                                 "send refused\n", dccp_role(sk), sk);
-                       return -ENOBUFS;
-               }
-
-               dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
-       }
-
        dccp_timestamp(sk, &now);
 
        switch (hctx->ccid3hctx_state) {
        case TFRC_SSTATE_NO_SENT:
                sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
-                              jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
+                              (jiffies +
+                               usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
                hctx->ccid3hctx_last_win_count   = 0;
                hctx->ccid3hctx_t_last_win_count = now;
                ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
 
-               /* Set initial sending rate to 1 packet per second */
+               /* Set initial sending rate X/s to 1pps (X is scaled by 2^6) */
                ccid3_hc_tx_update_s(hctx, skb->len);
-               hctx->ccid3hctx_x     = hctx->ccid3hctx_s;
+               hctx->ccid3hctx_x = hctx->ccid3hctx_s;
+               hctx->ccid3hctx_x <<= 6;
 
                /* First timeout, according to [RFC 3448, 4.2], is 1 second */
                hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
@@ -332,77 +341,57 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
        case TFRC_SSTATE_FBACK:
                delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
                /*
-                *      Scheduling of packet transmissions [RFC 3448, 4.6]
+                *      Scheduling of packet transmissions [RFC 3448, 4.6]
                 *
                 * if (t_now > t_nom - delta)
                 *       // send the packet now
                 * else
                 *       // send the packet in (t_nom - t_now) milliseconds.
                 */
-               if (delay - (long)hctx->ccid3hctx_delta >= 0)
+               if (delay - (suseconds_t)hctx->ccid3hctx_delta >= 0)
                        return delay / 1000L;
+
+               ccid3_hc_tx_update_win_count(hctx, &now);
                break;
        case TFRC_SSTATE_TERM:
-               DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
+               DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
                return -EINVAL;
        }
 
        /* prepare to send now (add options etc.) */
        dp->dccps_hc_tx_insert_options = 1;
-       new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
-                                   hctx->ccid3hctx_last_win_count;
+       DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
+
+       /* set the nominal send time for the next following packet */
        timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
 
        return 0;
 }
 
-static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
+static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
+                                   unsigned int len)
 {
-       const struct dccp_sock *dp = dccp_sk(sk);
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
        struct timeval now;
-       unsigned long quarter_rtt;
        struct dccp_tx_hist_entry *packet;
 
        BUG_ON(hctx == NULL);
 
-       dccp_timestamp(sk, &now);
-
        ccid3_hc_tx_update_s(hctx, len);
 
-       packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
+       packet = dccp_tx_hist_entry_new(ccid3_tx_hist, GFP_ATOMIC);
        if (unlikely(packet == NULL)) {
-               DCCP_WARN("packet doesn't exist in history!\n");
-               return;
-       }
-       if (unlikely(packet->dccphtx_sent)) {
-               DCCP_WARN("no unsent packet in history!\n");
+               DCCP_CRIT("packet history - out of memory!");
                return;
        }
-       packet->dccphtx_tstamp = now;
-       packet->dccphtx_seqno  = dp->dccps_gss;
-       /*
-        * Check if win_count have changed
-        * Algorithm in "8.1. Window Counter Value" in RFC 4342.
-        */
-       quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
-       if (likely(hctx->ccid3hctx_rtt > 8))
-               quarter_rtt /= hctx->ccid3hctx_rtt / 4;
-
-       if (quarter_rtt > 0) {
-               hctx->ccid3hctx_t_last_win_count = now;
-               hctx->ccid3hctx_last_win_count   = (hctx->ccid3hctx_last_win_count +
-                                                   min_t(unsigned long, quarter_rtt, 5)) % 16;
-               ccid3_pr_debug("%s, sk=%p, window changed from "
-                              "%u to %u!\n",
-                              dccp_role(sk), sk,
-                              packet->dccphtx_ccval,
-                              hctx->ccid3hctx_last_win_count);
-       }
+       dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, packet);
 
-       hctx->ccid3hctx_idle = 0;
-       packet->dccphtx_rtt  = hctx->ccid3hctx_rtt;
-       packet->dccphtx_sent = 1;
+       dccp_timestamp(sk, &now);
+       packet->dccphtx_tstamp = now;
+       packet->dccphtx_seqno  = dccp_sk(sk)->dccps_gss;
+       packet->dccphtx_rtt    = hctx->ccid3hctx_rtt;
+       packet->dccphtx_sent   = 1;
+       hctx->ccid3hctx_idle   = 0;
 }
 
 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -414,7 +403,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        struct timeval now;
        unsigned long t_nfb;
        u32 pinv;
-       long r_sample, t_elapsed;
+       suseconds_t r_sample, t_elapsed;
 
        BUG_ON(hctx == NULL);
 
@@ -430,44 +419,44 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        case TFRC_SSTATE_FBACK:
                /* get packet from history to look up t_recvdata */
                packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
-                                                DCCP_SKB_CB(skb)->dccpd_ack_seq);
+                                             DCCP_SKB_CB(skb)->dccpd_ack_seq);
                if (unlikely(packet == NULL)) {
                        DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist "
                                  "in history!\n",  dccp_role(sk), sk,
                            (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
-                                 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
+                               dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
                        return;
                }
 
-               /* Update receive rate */
+               /* Update receive rate in units of 64 * bytes/second */
                hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate;
+               hctx->ccid3hctx_x_recv <<= 6;
 
                /* Update loss event rate */
                pinv = opt_recv->ccid3or_loss_event_rate;
-               if (pinv == ~0U || pinv == 0)
+               if (pinv == ~0U || pinv == 0)          /* see RFC 4342, 8.5   */
                        hctx->ccid3hctx_p = 0;
-               else
-                       hctx->ccid3hctx_p = 1000000 / pinv;
+               else                                   /* can not exceed 100% */
+                       hctx->ccid3hctx_p = 1000000 / pinv;
 
                dccp_timestamp(sk, &now);
 
                /*
                 * Calculate new round trip sample as per [RFC 3448, 4.3] by
-                *      R_sample  =  (now - t_recvdata) - t_elapsed
+                *      R_sample  =  (now - t_recvdata) - t_elapsed
                 */
                r_sample  = timeval_delta(&now, &packet->dccphtx_tstamp);
                t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
 
-               if (unlikely(r_sample <= 0)) {
-                       DCCP_WARN("WARNING: R_sample (%ld) <= 0!\n", r_sample);
-                       r_sample = 0;
-               } else if (unlikely(r_sample <= t_elapsed))
-                       DCCP_WARN("WARNING: r_sample=%ldus <= t_elapsed=%ldus\n",
-                                 r_sample, t_elapsed);
+               DCCP_BUG_ON(r_sample < 0);
+               if (unlikely(r_sample <= t_elapsed))
+                       DCCP_WARN("WARNING: r_sample=%dus <= t_elapsed=%dus\n",
+                                 (int)r_sample, (int)t_elapsed);
                else
                        r_sample -= t_elapsed;
+               CCID3_RTT_SANITY_CHECK(r_sample);
 
-               /* Update RTT estimate by 
+               /* Update RTT estimate by
                 * If (No feedback recv)
                 *    R = R_sample;
                 * Else
@@ -476,34 +465,45 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                 * q is a constant, RFC 3448 recomments 0.9
                 */
                if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
-                       /* Use Larger Initial Windows [RFC 4342, sec. 5]
-                        * We deviate in that we use `s' instead of `MSS'. */
-                       u16 w_init = max(    4 * hctx->ccid3hctx_s,
-                                        max(2 * hctx->ccid3hctx_s, 4380));
+                       /*
+                        * Larger Initial Windows [RFC 4342, sec. 5]
+                        * We deviate in that we use `s' instead of `MSS'.
+                        */
+                       __u64 w_init = min(4 * hctx->ccid3hctx_s,
+                                          max(2 * hctx->ccid3hctx_s, 4380));
                        hctx->ccid3hctx_rtt  = r_sample;
-                       hctx->ccid3hctx_x    = usecs_div(w_init, r_sample);
+                       hctx->ccid3hctx_x    = scaled_div(w_init << 6, r_sample);
                        hctx->ccid3hctx_t_ld = now;
 
                        ccid3_update_send_time(hctx);
 
-                       ccid3_pr_debug("%s(%p), s=%u, w_init=%u, "
-                                      "R_sample=%ldus, X=%u\n", dccp_role(sk),
-                                      sk, hctx->ccid3hctx_s, w_init, r_sample,
-                                      hctx->ccid3hctx_x);
+                       ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
+                                      "R_sample=%dus, X=%u\n", dccp_role(sk),
+                                      sk, hctx->ccid3hctx_s, w_init,
+                                      (int)r_sample,
+                                      (unsigned)(hctx->ccid3hctx_x >> 6));
 
                        ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
                } else {
                        hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
-                                                  (u32)r_sample        ) / 10;
-
+                                                  (u32)r_sample) / 10;
+
+                       /* Update sending rate (step 4 of [RFC 3448, 4.3]) */
+                       if (hctx->ccid3hctx_p > 0)
+                               hctx->ccid3hctx_x_calc =
+                                       tfrc_calc_x(hctx->ccid3hctx_s,
+                                                   hctx->ccid3hctx_rtt,
+                                                   hctx->ccid3hctx_p);
                        ccid3_hc_tx_update_x(sk, &now);
 
-                       ccid3_pr_debug("%s(%p), RTT=%uus (sample=%ldus), s=%u, "
-                                      "p=%u, X_calc=%u, X=%u\n", dccp_role(sk),
-                                      sk, hctx->ccid3hctx_rtt, r_sample,
+                       ccid3_pr_debug("%s(%p), RTT=%uus (sample=%dus), s=%u, "
+                                      "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
+                                      dccp_role(sk),
+                                      sk, hctx->ccid3hctx_rtt, (int)r_sample,
                                       hctx->ccid3hctx_s, hctx->ccid3hctx_p,
                                       hctx->ccid3hctx_x_calc,
-                                      hctx->ccid3hctx_x);
+                                      (unsigned)(hctx->ccid3hctx_x_recv >> 6),
+                                      (unsigned)(hctx->ccid3hctx_x >> 6));
                }
 
                /* unschedule no feedback timer */
@@ -513,57 +513,48 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                dccp_tx_hist_purge_older(ccid3_tx_hist,
                                         &hctx->ccid3hctx_hist, packet);
                /*
-                * As we have calculated new ipi, delta, t_nom it is possible that
-                * we now can send a packet, so wake up dccp_wait_for_ccid
+                * As we have calculated new ipi, delta, t_nom it is possible
+                * that we now can send a packet, so wake up dccp_wait_for_ccid
                 */
                sk->sk_write_space(sk);
 
                /*
                 * Update timeout interval for the nofeedback timer.
                 * We use a configuration option to increase the lower bound.
-                * This can help avoid triggering the nofeedback timer too often
-                * ('spinning') on LANs with small RTTs.
+                * This can help avoid triggering the nofeedback timer too
+                * often ('spinning') on LANs with small RTTs.
                 */
                hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
                                                   CONFIG_IP_DCCP_CCID3_RTO *
-                                                  (USEC_PER_SEC/1000)       );
+                                                  (USEC_PER_SEC/1000));
                /*
                 * Schedule no feedback timer to expire in
                 * max(t_RTO, 2 * s/X)  =  max(t_RTO, 2 * t_ipi)
                 */
                t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
-                       
-               ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
+
+               ccid3_pr_debug("%s(%p), Scheduled no feedback timer to "
                               "expire in %lu jiffies (%luus)\n",
-                              dccp_role(sk), sk,
-                              usecs_to_jiffies(t_nfb), t_nfb);
+                              dccp_role(sk),
+                              sk, usecs_to_jiffies(t_nfb), t_nfb);
 
-               sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 
+               sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
                                   jiffies + usecs_to_jiffies(t_nfb));
 
                /* set idle flag */
-               hctx->ccid3hctx_idle = 1;   
+               hctx->ccid3hctx_idle = 1;
                break;
        case TFRC_SSTATE_NO_SENT:
-               if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT)
-                       DCCP_WARN("Illegal ACK received - no packet sent\n");
+               /*
+                * XXX when implementing bidirectional rx/tx check this again
+                */
+               DCCP_WARN("Illegal ACK received - no packet sent\n");
                /* fall through */
        case TFRC_SSTATE_TERM:          /* ignore feedback when closing */
                break;
        }
 }
 
-static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
-{
-       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
-
-       BUG_ON(hctx == NULL);
-
-       if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
-               DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
-       return 0;
-}
-
 static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
                                     unsigned char len, u16 idx,
                                     unsigned char *value)
@@ -588,13 +579,14 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
        switch (option) {
        case TFRC_OPT_LOSS_EVENT_RATE:
                if (unlikely(len != 4)) {
-                       DCCP_WARN("%s, sk=%p, invalid len %d "
+                       DCCP_WARN("%s(%p), invalid len %d "
                                  "for TFRC_OPT_LOSS_EVENT_RATE\n",
                                  dccp_role(sk), sk, len);
                        rc = -EINVAL;
                } else {
-                       opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
-                       ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
+                       opt_recv->ccid3or_loss_event_rate =
+                                               ntohl(*(__be32 *)value);
+                       ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
                                       dccp_role(sk), sk,
                                       opt_recv->ccid3or_loss_event_rate);
                }
@@ -602,20 +594,21 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
        case TFRC_OPT_LOSS_INTERVALS:
                opt_recv->ccid3or_loss_intervals_idx = idx;
                opt_recv->ccid3or_loss_intervals_len = len;
-               ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
+               ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
                               dccp_role(sk), sk,
                               opt_recv->ccid3or_loss_intervals_idx,
                               opt_recv->ccid3or_loss_intervals_len);
                break;
        case TFRC_OPT_RECEIVE_RATE:
                if (unlikely(len != 4)) {
-                       DCCP_WARN("%s, sk=%p, invalid len %d "
+                       DCCP_WARN("%s(%p), invalid len %d "
                                  "for TFRC_OPT_RECEIVE_RATE\n",
                                  dccp_role(sk), sk, len);
                        rc = -EINVAL;
                } else {
-                       opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
-                       ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
+                       opt_recv->ccid3or_receive_rate =
+                                               ntohl(*(__be32 *)value);
+                       ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
                                       dccp_role(sk), sk,
                                       opt_recv->ccid3or_receive_rate);
                }
@@ -630,10 +623,12 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
        struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
 
        hctx->ccid3hctx_s     = 0;
+       hctx->ccid3hctx_rtt   = 0;
        hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
        INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
 
-       hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
+       hctx->ccid3hctx_no_feedback_timer.function =
+                               ccid3_hc_tx_no_feedback_timer;
        hctx->ccid3hctx_no_feedback_timer.data     = (unsigned long)sk;
        init_timer(&hctx->ccid3hctx_no_feedback_timer);
 
@@ -698,8 +693,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
        struct dccp_sock *dp = dccp_sk(sk);
        struct dccp_rx_hist_entry *packet;
        struct timeval now;
+       suseconds_t delta;
 
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
+       ccid3_pr_debug("%s(%p) - entry \n", dccp_role(sk), sk);
 
        dccp_timestamp(sk, &now);
 
@@ -707,21 +703,21 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
        case TFRC_RSTATE_NO_DATA:
                hcrx->ccid3hcrx_x_recv = 0;
                break;
-       case TFRC_RSTATE_DATA: {
-               const u32 delta = timeval_delta(&now,
-                                       &hcrx->ccid3hcrx_tstamp_last_feedback);
-               hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
-                                                  delta);
-       }
+       case TFRC_RSTATE_DATA:
+               delta = timeval_delta(&now,
+                                     &hcrx->ccid3hcrx_tstamp_last_feedback);
+               DCCP_BUG_ON(delta < 0);
+               hcrx->ccid3hcrx_x_recv =
+                       scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
                break;
        case TFRC_RSTATE_TERM:
-               DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
+               DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
                return;
        }
 
        packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
        if (unlikely(packet == NULL)) {
-               DCCP_WARN("%s, sk=%p, no data packet in history!\n",
+               DCCP_WARN("%s(%p), no data packet in history!\n",
                          dccp_role(sk), sk);
                return;
        }
@@ -730,13 +726,19 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
        hcrx->ccid3hcrx_ccval_last_counter   = packet->dccphrx_ccval;
        hcrx->ccid3hcrx_bytes_recv           = 0;
 
-       /* Convert to multiples of 10us */
-       hcrx->ccid3hcrx_elapsed_time =
-                       timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
+       /* Elapsed time information [RFC 4340, 13.2] in units of 10 * usecs */
+       delta = timeval_delta(&now, &packet->dccphrx_tstamp);
+       DCCP_BUG_ON(delta < 0);
+       hcrx->ccid3hcrx_elapsed_time = delta / 10;
+
        if (hcrx->ccid3hcrx_p == 0)
-               hcrx->ccid3hcrx_pinv = ~0;
-       else
+               hcrx->ccid3hcrx_pinv = ~0U;     /* see RFC 4342, 8.5 */
+       else if (hcrx->ccid3hcrx_p > 1000000) {
+               DCCP_WARN("p (%u) > 100%%\n", hcrx->ccid3hcrx_p);
+               hcrx->ccid3hcrx_pinv = 1;       /* use 100% in this case */
+       } else
                hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
+
        dp->dccps_hc_rx_insert_options = 1;
        dccp_send_ack(sk);
 }
@@ -764,9 +766,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
                                             hcrx->ccid3hcrx_elapsed_time)) ||
            dccp_insert_option_timestamp(sk, skb) ||
            dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
-                              &pinv, sizeof(pinv)) ||
+                              &pinv, sizeof(pinv)) ||
            dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
-                              &x_recv, sizeof(x_recv)))
+                              &x_recv, sizeof(x_recv)))
                return -1;
 
        return 0;
@@ -780,12 +782,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
        struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
-       u32 rtt, delta, x_recv, fval, p, tmp2;
+       u32 x_recv, p;
+       suseconds_t rtt, delta;
        struct timeval tstamp = { 0, };
        int interval = 0;
        int win_count = 0;
        int step = 0;
-       u64 tmp1;
+       u64 fval;
 
        list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
                                 dccphrx_node) {
@@ -810,13 +813,13 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
        }
 
        if (unlikely(step == 0)) {
-               DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
+               DCCP_WARN("%s(%p), packet history has no data packets!\n",
                          dccp_role(sk), sk);
                return ~0;
        }
 
        if (unlikely(interval == 0)) {
-               DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
+               DCCP_WARN("%s(%p), Could not find a win_count interval > 0."
                          "Defaulting to 1\n", dccp_role(sk), sk);
                interval = 1;
        }
@@ -825,41 +828,51 @@ found:
                DCCP_CRIT("tail is null\n");
                return ~0;
        }
-       rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
-       ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
-                      dccp_role(sk), sk, rtt);
 
-       if (rtt == 0) {
-               DCCP_WARN("RTT==0, setting to 1\n");
-               rtt = 1;
+       delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp);
+       DCCP_BUG_ON(delta < 0);
+
+       rtt = delta * 4 / interval;
+       ccid3_pr_debug("%s(%p), approximated RTT to %dus\n",
+                      dccp_role(sk), sk, (int)rtt);
+
+       /*
+        * Determine the length of the first loss interval via inverse lookup.
+        * Assume that X_recv can be computed by the throughput equation
+        *                  s
+        *      X_recv = --------
+        *               R * fval
+        * Find some p such that f(p) = fval; return 1/p [RFC 3448, 6.3.1].
+        */
+       if (rtt == 0) {                 /* would result in divide-by-zero */
+               DCCP_WARN("RTT==0, returning 1/p = 1\n");
+               return 1000000;
        }
 
        dccp_timestamp(sk, &tstamp);
        delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
-       x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
-
-       if (x_recv == 0)
-               x_recv = hcrx->ccid3hcrx_x_recv;
-
-       tmp1 = (u64)x_recv * (u64)rtt;
-       do_div(tmp1,10000000);
-       tmp2 = (u32)tmp1;
-
-       if (!tmp2) {
-               DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
-               return ~0;
+       DCCP_BUG_ON(delta <= 0);
+
+       x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta);
+       if (x_recv == 0) {              /* would also trigger divide-by-zero */
+               DCCP_WARN("X_recv==0\n");
+               if ((x_recv = hcrx->ccid3hcrx_x_recv) == 0) {
+                       DCCP_BUG("stored value of X_recv is zero");
+                       return 1000000;
+               }
        }
 
-       fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
-       /* do not alter order above or you will get overflow on 32 bit */
+       fval = scaled_div(hcrx->ccid3hcrx_s, rtt);
+       fval = scaled_div32(fval, x_recv);
        p = tfrc_calc_x_reverse_lookup(fval);
-       ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
+
+       ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied "
                       "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
 
        if (p == 0)
                return ~0;
        else
-               return 1000000 / p; 
+               return 1000000 / p;
 }
 
 static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
@@ -913,7 +926,8 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
                                     struct dccp_rx_hist_entry *packet)
 {
        struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
-       struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
+       struct dccp_rx_hist_entry *rx_hist =
+                               dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
        u64 seqno = packet->dccphrx_seqno;
        u64 tmp_seqno;
        int loss = 0;
@@ -941,7 +955,7 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk,
                dccp_inc_seqno(&tmp_seqno);
                while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
                   tmp_seqno, &ccval)) {
-                       hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
+                       hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
                        hcrx->ccid3hcrx_ccval_nonloss = ccval;
                        dccp_inc_seqno(&tmp_seqno);
                }
@@ -967,7 +981,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
        const struct dccp_options_received *opt_recv;
        struct dccp_rx_hist_entry *packet;
        struct timeval now;
-       u32 p_prev, rtt_prev, r_sample, t_elapsed;
+       u32 p_prev, rtt_prev;
+       suseconds_t r_sample, t_elapsed;
        int loss, payload_size;
 
        BUG_ON(hcrx == NULL);
@@ -987,11 +1002,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                r_sample = timeval_usecs(&now);
                t_elapsed = opt_recv->dccpor_elapsed_time * 10;
 
+               DCCP_BUG_ON(r_sample < 0);
                if (unlikely(r_sample <= t_elapsed))
-                       DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
+                       DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
                                  r_sample, t_elapsed);
                else
                        r_sample -= t_elapsed;
+               CCID3_RTT_SANITY_CHECK(r_sample);
 
                if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
                        hcrx->ccid3hcrx_rtt = r_sample;
@@ -1000,8 +1017,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                                              r_sample / 10;
 
                if (rtt_prev != hcrx->ccid3hcrx_rtt)
-                       ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
-                                      dccp_role(sk), hcrx->ccid3hcrx_rtt,
+                       ccid3_pr_debug("%s(%p), New RTT=%uus, elapsed time=%u\n",
+                                      dccp_role(sk), sk, hcrx->ccid3hcrx_rtt,
                                       opt_recv->dccpor_elapsed_time);
                break;
        case DCCP_PKT_DATA:
@@ -1013,7 +1030,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
        packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
                                        skb, GFP_ATOMIC);
        if (unlikely(packet == NULL)) {
-               DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
+               DCCP_WARN("%s(%p), Not enough mem to add rx packet "
                          "to history, consider it lost!\n", dccp_role(sk), sk);
                return;
        }
@@ -1028,9 +1045,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
 
        switch (hcrx->ccid3hcrx_state) {
        case TFRC_RSTATE_NO_DATA:
-               ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
-                              "feedback\n",
-                              dccp_role(sk), sk,
+               ccid3_pr_debug("%s(%p, state=%s), skb=%p, sending initial "
+                              "feedback\n", dccp_role(sk), sk,
                               dccp_state_name(sk->sk_state), skb);
                ccid3_hc_rx_send_feedback(sk);
                ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
@@ -1041,19 +1057,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                        break;
 
                dccp_timestamp(sk, &now);
-               if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
-                   hcrx->ccid3hcrx_rtt) {
+               if ((timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) -
+                    (suseconds_t)hcrx->ccid3hcrx_rtt) >= 0) {
                        hcrx->ccid3hcrx_tstamp_last_ack = now;
                        ccid3_hc_rx_send_feedback(sk);
                }
                return;
        case TFRC_RSTATE_TERM:
-               DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
+               DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
                return;
        }
 
        /* Dealing with packet loss */
-       ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
+       ccid3_pr_debug("%s(%p, state=%s), data loss! Reacting...\n",
                       dccp_role(sk), sk, dccp_state_name(sk->sk_state));
 
        p_prev = hcrx->ccid3hcrx_p;
@@ -1078,7 +1094,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
 
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
+       ccid3_pr_debug("entry\n");
 
        hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
        INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
@@ -1086,7 +1102,7 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
        dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
        hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
        hcrx->ccid3hcrx_s   = 0;
-       hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
+       hcrx->ccid3hcrx_rtt = 0;
        return 0;
 }
 
@@ -1115,9 +1131,9 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
 
        BUG_ON(hcrx == NULL);
 
-       info->tcpi_ca_state     = hcrx->ccid3hcrx_state;
-       info->tcpi_options      |= TCPI_OPT_TIMESTAMPS;
-       info->tcpi_rcv_rtt      = hcrx->ccid3hcrx_rtt;
+       info->tcpi_ca_state = hcrx->ccid3hcrx_state;
+       info->tcpi_options  |= TCPI_OPT_TIMESTAMPS;
+       info->tcpi_rcv_rtt  = hcrx->ccid3hcrx_rtt;
 }
 
 static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
@@ -1198,7 +1214,6 @@ static struct ccid_operations ccid3 = {
        .ccid_hc_tx_send_packet    = ccid3_hc_tx_send_packet,
        .ccid_hc_tx_packet_sent    = ccid3_hc_tx_packet_sent,
        .ccid_hc_tx_packet_recv    = ccid3_hc_tx_packet_recv,
-       .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
        .ccid_hc_tx_parse_options  = ccid3_hc_tx_parse_options,
        .ccid_hc_rx_obj_size       = sizeof(struct ccid3_hc_rx_sock),
        .ccid_hc_rx_init           = ccid3_hc_rx_init,
@@ -1210,7 +1225,7 @@ static struct ccid_operations ccid3 = {
        .ccid_hc_rx_getsockopt     = ccid3_hc_rx_getsockopt,
        .ccid_hc_tx_getsockopt     = ccid3_hc_tx_getsockopt,
 };
+
 #ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 module_param(ccid3_debug, int, 0444);
 MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
@@ -1233,7 +1248,7 @@ static __init int ccid3_module_init(void)
                goto out_free_tx;
 
        rc = ccid_register(&ccid3);
-       if (rc != 0) 
+       if (rc != 0)
                goto out_free_loss_interval_history;
 out:
        return rc;
index 07596d704ef9a67b802885ae739152dc6906e7ea..15776a88c0901c82c876026c42a0eb8b1af0826c 100644 (file)
 /* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
 #define TFRC_T_MBI                64
 
+/* What we think is a reasonable upper limit on RTT values */
+#define CCID3_SANE_RTT_MAX        ((suseconds_t)(4 * USEC_PER_SEC))
+
+#define CCID3_RTT_SANITY_CHECK(rtt)                    do {               \
+               if (rtt > CCID3_SANE_RTT_MAX) {                            \
+                       DCCP_CRIT("RTT (%d) too large, substituting %d",   \
+                                 (int)rtt, (int)CCID3_SANE_RTT_MAX);      \
+                       rtt = CCID3_SANE_RTT_MAX;                          \
+               }                                       } while (0)
+
 enum ccid3_options {
        TFRC_OPT_LOSS_EVENT_RATE = 192,
        TFRC_OPT_LOSS_INTERVALS  = 193,
@@ -67,7 +77,7 @@ struct ccid3_options_received {
 
 /* TFRC sender states */
 enum ccid3_hc_tx_states {
-               TFRC_SSTATE_NO_SENT = 1,
+       TFRC_SSTATE_NO_SENT = 1,
        TFRC_SSTATE_NO_FBACK,
        TFRC_SSTATE_FBACK,
        TFRC_SSTATE_TERM,
@@ -75,23 +85,23 @@ enum ccid3_hc_tx_states {
 
 /** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
  *
- * @ccid3hctx_x - Current sending rate
- * @ccid3hctx_x_recv - Receive rate
- * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1)
+ * @ccid3hctx_x - Current sending rate in 64 * bytes per second
+ * @ccid3hctx_x_recv - Receive rate    in 64 * bytes per second
+ * @ccid3hctx_x_calc - Calculated rate in bytes per second
  * @ccid3hctx_rtt - Estimate of current round trip time in usecs
  * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
- * @ccid3hctx_s - Packet size
- * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1)
- * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6)
+ * @ccid3hctx_s - Packet size in bytes
+ * @ccid3hctx_t_rto - Nofeedback Timer setting in usecs
+ * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) in usecs
  * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states
  * @ccid3hctx_last_win_count - Last window counter sent
  * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
- *                              with last_win_count value sent
+ *                              with last_win_count value sent
  * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
  * @ccid3hctx_idle - Flag indicating that sender is idling
  * @ccid3hctx_t_ld - Time last doubled during slow start
  * @ccid3hctx_t_nom - Nominal send time of next packet
- * @ccid3hctx_delta - Send timer delta
+ * @ccid3hctx_delta - Send timer delta (RFC 3448, 4.6) in usecs
  * @ccid3hctx_hist - Packet history
  * @ccid3hctx_options_received - Parsed set of retrieved options
  */
@@ -105,7 +115,7 @@ struct ccid3_hc_tx_sock {
 #define ccid3hctx_t_rto                        ccid3hctx_tfrc.tfrctx_rto
 #define ccid3hctx_t_ipi                        ccid3hctx_tfrc.tfrctx_ipi
        u16                             ccid3hctx_s;
-       enum ccid3_hc_tx_states         ccid3hctx_state:8;
+       enum ccid3_hc_tx_states         ccid3hctx_state:8;
        u8                              ccid3hctx_last_win_count;
        u8                              ccid3hctx_idle;
        struct timeval                  ccid3hctx_t_last_win_count;
@@ -119,7 +129,7 @@ struct ccid3_hc_tx_sock {
 
 /* TFRC receiver states */
 enum ccid3_hc_rx_states {
-               TFRC_RSTATE_NO_DATA = 1,
+       TFRC_RSTATE_NO_DATA = 1,
        TFRC_RSTATE_DATA,
        TFRC_RSTATE_TERM    = 127,
 };
@@ -147,18 +157,18 @@ struct ccid3_hc_rx_sock {
 #define ccid3hcrx_x_recv               ccid3hcrx_tfrc.tfrcrx_x_recv
 #define ccid3hcrx_rtt                  ccid3hcrx_tfrc.tfrcrx_rtt
 #define ccid3hcrx_p                    ccid3hcrx_tfrc.tfrcrx_p
-       u64                             ccid3hcrx_seqno_nonloss:48,
+       u64                             ccid3hcrx_seqno_nonloss:48,
                                        ccid3hcrx_ccval_nonloss:4,
                                        ccid3hcrx_ccval_last_counter:4;
        enum ccid3_hc_rx_states         ccid3hcrx_state:8;
-       u32                             ccid3hcrx_bytes_recv;
-       struct timeval                  ccid3hcrx_tstamp_last_feedback;
-       struct timeval                  ccid3hcrx_tstamp_last_ack;
+       u32                             ccid3hcrx_bytes_recv;
+       struct timeval                  ccid3hcrx_tstamp_last_feedback;
+       struct timeval                  ccid3hcrx_tstamp_last_ack;
        struct list_head                ccid3hcrx_hist;
        struct list_head                ccid3hcrx_li_hist;
-       u16                             ccid3hcrx_s;
-       u32                             ccid3hcrx_pinv;
-       u32                             ccid3hcrx_elapsed_time;
+       u16                             ccid3hcrx_s;
+       u32                             ccid3hcrx_pinv;
+       u32                             ccid3hcrx_elapsed_time;
 };
 
 static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
index b876c9c81c652f5a45b28b98998d378a0a0e7a6d..2e8ef42721e264e5f06149c21d7d6aa631d15552 100644 (file)
 
 #include <linux/module.h>
 #include <linux/string.h>
-
 #include "packet_history.h"
 
+/*
+ *     Transmitter History Routines
+ */
+struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
+{
+       struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
+       static const char dccp_tx_hist_mask[] = "tx_hist_%s";
+       char *slab_name;
+
+       if (hist == NULL)
+               goto out;
+
+       slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
+                           GFP_ATOMIC);
+       if (slab_name == NULL)
+               goto out_free_hist;
+
+       sprintf(slab_name, dccp_tx_hist_mask, name);
+       hist->dccptxh_slab = kmem_cache_create(slab_name,
+                                            sizeof(struct dccp_tx_hist_entry),
+                                              0, SLAB_HWCACHE_ALIGN,
+                                              NULL, NULL);
+       if (hist->dccptxh_slab == NULL)
+               goto out_free_slab_name;
+out:
+       return hist;
+out_free_slab_name:
+       kfree(slab_name);
+out_free_hist:
+       kfree(hist);
+       hist = NULL;
+       goto out;
+}
+
+EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
+
+void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
+{
+       const char* name = kmem_cache_name(hist->dccptxh_slab);
+
+       kmem_cache_destroy(hist->dccptxh_slab);
+       kfree(name);
+       kfree(hist);
+}
+
+EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
+
+struct dccp_tx_hist_entry *
+       dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
+{
+       struct dccp_tx_hist_entry *packet = NULL, *entry;
+
+       list_for_each_entry(entry, list, dccphtx_node)
+               if (entry->dccphtx_seqno == seq) {
+                       packet = entry;
+                       break;
+               }
+
+       return packet;
+}
+
+EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
+
+void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
+{
+       struct dccp_tx_hist_entry *entry, *next;
+
+       list_for_each_entry_safe(entry, next, list, dccphtx_node) {
+               list_del_init(&entry->dccphtx_node);
+               dccp_tx_hist_entry_delete(hist, entry);
+       }
+}
+
+EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
+
+void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
+                             struct list_head *list,
+                             struct dccp_tx_hist_entry *packet)
+{
+       struct dccp_tx_hist_entry *next;
+
+       list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
+               list_del_init(&packet->dccphtx_node);
+               dccp_tx_hist_entry_delete(hist, packet);
+       }
+}
+
+EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
+
+/*
+ *     Receiver History Routines
+ */
 struct dccp_rx_hist *dccp_rx_hist_new(const char *name)
 {
        struct dccp_rx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -83,18 +174,24 @@ void dccp_rx_hist_delete(struct dccp_rx_hist *hist)
 
 EXPORT_SYMBOL_GPL(dccp_rx_hist_delete);
 
-void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
+int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
+                           u8 *ccval)
 {
-       struct dccp_rx_hist_entry *entry, *next;
+       struct dccp_rx_hist_entry *packet = NULL, *entry;
 
-       list_for_each_entry_safe(entry, next, list, dccphrx_node) {
-               list_del_init(&entry->dccphrx_node);
-               kmem_cache_free(hist->dccprxh_slab, entry);
-       }
-}
+       list_for_each_entry(entry, list, dccphrx_node)
+               if (entry->dccphrx_seqno == seq) {
+                       packet = entry;
+                       break;
+               }
 
-EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
+       if (packet)
+               *ccval = packet->dccphrx_ccval;
 
+       return packet != NULL;
+}
+
+EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
 struct dccp_rx_hist_entry *
                dccp_rx_hist_find_data_packet(const struct list_head *list)
 {
@@ -184,110 +281,18 @@ void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
 
 EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
 
-struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
-{
-       struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
-       static const char dccp_tx_hist_mask[] = "tx_hist_%s";
-       char *slab_name;
-
-       if (hist == NULL)
-               goto out;
-
-       slab_name = kmalloc(strlen(name) + sizeof(dccp_tx_hist_mask) - 1,
-                           GFP_ATOMIC);
-       if (slab_name == NULL)
-               goto out_free_hist;
-
-       sprintf(slab_name, dccp_tx_hist_mask, name);
-       hist->dccptxh_slab = kmem_cache_create(slab_name,
-                                            sizeof(struct dccp_tx_hist_entry),
-                                              0, SLAB_HWCACHE_ALIGN,
-                                              NULL, NULL);
-       if (hist->dccptxh_slab == NULL)
-               goto out_free_slab_name;
-out:
-       return hist;
-out_free_slab_name:
-       kfree(slab_name);
-out_free_hist:
-       kfree(hist);
-       hist = NULL;
-       goto out;
-}
-
-EXPORT_SYMBOL_GPL(dccp_tx_hist_new);
-
-void dccp_tx_hist_delete(struct dccp_tx_hist *hist)
-{
-       const char* name = kmem_cache_name(hist->dccptxh_slab);
-
-       kmem_cache_destroy(hist->dccptxh_slab);
-       kfree(name);
-       kfree(hist);
-}
-
-EXPORT_SYMBOL_GPL(dccp_tx_hist_delete);
-
-struct dccp_tx_hist_entry *
-       dccp_tx_hist_find_entry(const struct list_head *list, const u64 seq)
-{
-       struct dccp_tx_hist_entry *packet = NULL, *entry;
-
-       list_for_each_entry(entry, list, dccphtx_node)
-               if (entry->dccphtx_seqno == seq) {
-                       packet = entry;
-                       break;
-               }
-
-       return packet;
-}
-
-EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
-
-int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
-   u8 *ccval)
-{
-       struct dccp_rx_hist_entry *packet = NULL, *entry;
-
-       list_for_each_entry(entry, list, dccphrx_node)
-               if (entry->dccphrx_seqno == seq) {
-                       packet = entry;
-                       break;
-               }
-
-       if (packet)
-               *ccval = packet->dccphrx_ccval;
-
-       return packet != NULL;
-}
-
-EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
-
-void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
-                             struct list_head *list,
-                             struct dccp_tx_hist_entry *packet)
+void dccp_rx_hist_purge(struct dccp_rx_hist *hist, struct list_head *list)
 {
-       struct dccp_tx_hist_entry *next;
+       struct dccp_rx_hist_entry *entry, *next;
 
-       list_for_each_entry_safe_continue(packet, next, list, dccphtx_node) {
-               list_del_init(&packet->dccphtx_node);
-               dccp_tx_hist_entry_delete(hist, packet);
+       list_for_each_entry_safe(entry, next, list, dccphrx_node) {
+               list_del_init(&entry->dccphrx_node);
+               kmem_cache_free(hist->dccprxh_slab, entry);
        }
 }
 
-EXPORT_SYMBOL_GPL(dccp_tx_hist_purge_older);
-
-void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
-{
-       struct dccp_tx_hist_entry *entry, *next;
-
-       list_for_each_entry_safe(entry, next, list, dccphtx_node) {
-               list_del_init(&entry->dccphtx_node);
-               dccp_tx_hist_entry_delete(hist, entry);
-       }
-}
+EXPORT_SYMBOL_GPL(dccp_rx_hist_purge);
 
-EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
 
 MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
              "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
index 9a8bcf224aa73151a3775d2426524c38e1d169dd..1f960c19ea1bf0ca948403321efd30e09fb98de8 100644 (file)
 #define TFRC_WIN_COUNT_PER_RTT  4
 #define TFRC_WIN_COUNT_LIMIT   16
 
+/*
+ *     Transmitter History data structures and declarations
+ */
 struct dccp_tx_hist_entry {
        struct list_head dccphtx_node;
        u64              dccphtx_seqno:48,
-                        dccphtx_ccval:4,
                         dccphtx_sent:1;
        u32              dccphtx_rtt;
        struct timeval   dccphtx_tstamp;
 };
 
-struct dccp_rx_hist_entry {
-       struct list_head dccphrx_node;
-       u64              dccphrx_seqno:48,
-                        dccphrx_ccval:4,
-                        dccphrx_type:4;
-       u32              dccphrx_ndp; /* In fact it is from 8 to 24 bits */
-       struct timeval   dccphrx_tstamp;
-};
-
 struct dccp_tx_hist {
        struct kmem_cache *dccptxh_slab;
 };
 
 extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
-extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
-
-struct dccp_rx_hist {
-       struct kmem_cache *dccprxh_slab;
-};
-
-extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
-extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist);
-extern struct dccp_rx_hist_entry *
-               dccp_rx_hist_find_data_packet(const struct list_head *list);
+extern void                dccp_tx_hist_delete(struct dccp_tx_hist *hist);
 
 static inline struct dccp_tx_hist_entry *
-               dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
-                                      const gfp_t prio)
+                       dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
+                                              const gfp_t prio)
 {
        struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
                                                            prio);
@@ -96,18 +80,20 @@ static inline struct dccp_tx_hist_entry *
        return entry;
 }
 
-static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
-                                            struct dccp_tx_hist_entry *entry)
+static inline struct dccp_tx_hist_entry *
+                       dccp_tx_hist_head(struct list_head *list)
 {
-       if (entry != NULL)
-               kmem_cache_free(hist->dccptxh_slab, entry);
+       struct dccp_tx_hist_entry *head = NULL;
+
+       if (!list_empty(list))
+               head = list_entry(list->next, struct dccp_tx_hist_entry,
+                                 dccphtx_node);
+       return head;
 }
 
 extern struct dccp_tx_hist_entry *
                        dccp_tx_hist_find_entry(const struct list_head *list,
                                                const u64 seq);
-extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
-   u8 *ccval);
 
 static inline void dccp_tx_hist_add_entry(struct list_head *list,
                                          struct dccp_tx_hist_entry *entry)
@@ -115,30 +101,45 @@ static inline void dccp_tx_hist_add_entry(struct list_head *list,
        list_add(&entry->dccphtx_node, list);
 }
 
+static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
+                                            struct dccp_tx_hist_entry *entry)
+{
+       if (entry != NULL)
+               kmem_cache_free(hist->dccptxh_slab, entry);
+}
+
+extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
+                              struct list_head *list);
+
 extern void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
                                     struct list_head *list,
                                     struct dccp_tx_hist_entry *next);
 
-extern void dccp_tx_hist_purge(struct dccp_tx_hist *hist,
-                              struct list_head *list);
+/*
+ *     Receiver History data structures and declarations
+ */
+struct dccp_rx_hist_entry {
+       struct list_head dccphrx_node;
+       u64              dccphrx_seqno:48,
+                        dccphrx_ccval:4,
+                        dccphrx_type:4;
+       u32              dccphrx_ndp; /* In fact it is from 8 to 24 bits */
+       struct timeval   dccphrx_tstamp;
+};
 
-static inline struct dccp_tx_hist_entry *
-               dccp_tx_hist_head(struct list_head *list)
-{
-       struct dccp_tx_hist_entry *head = NULL;
+struct dccp_rx_hist {
+       struct kmem_cache *dccprxh_slab;
+};
 
-       if (!list_empty(list))
-               head = list_entry(list->next, struct dccp_tx_hist_entry,
-                                 dccphtx_node);
-       return head;
-}
+extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
+extern void            dccp_rx_hist_delete(struct dccp_rx_hist *hist);
 
 static inline struct dccp_rx_hist_entry *
-                    dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
-                                           const struct sock *sk, 
-                                           const u32 ndp, 
-                                           const struct sk_buff *skb,
-                                           const gfp_t prio)
+                       dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
+                                              const struct sock *sk,
+                                              const u32 ndp,
+                                              const struct sk_buff *skb,
+                                              const gfp_t prio)
 {
        struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
                                                            prio);
@@ -156,18 +157,8 @@ static inline struct dccp_rx_hist_entry *
        return entry;
 }
 
-static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
-                                            struct dccp_rx_hist_entry *entry)
-{
-       if (entry != NULL)
-               kmem_cache_free(hist->dccprxh_slab, entry);
-}
-
-extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
-                              struct list_head *list);
-
 static inline struct dccp_rx_hist_entry *
-               dccp_rx_hist_head(struct list_head *list)
+                       dccp_rx_hist_head(struct list_head *list)
 {
        struct dccp_rx_hist_entry *head = NULL;
 
@@ -177,6 +168,27 @@ static inline struct dccp_rx_hist_entry *
        return head;
 }
 
+extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
+                                  u8 *ccval);
+extern struct dccp_rx_hist_entry *
+               dccp_rx_hist_find_data_packet(const struct list_head *list);
+
+extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
+                                   struct list_head *rx_list,
+                                   struct list_head *li_list,
+                                   struct dccp_rx_hist_entry *packet,
+                                   u64 nonloss_seqno);
+
+static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
+                                            struct dccp_rx_hist_entry *entry)
+{
+       if (entry != NULL)
+               kmem_cache_free(hist->dccprxh_slab, entry);
+}
+
+extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
+                              struct list_head *list);
+
 static inline int
        dccp_rx_hist_entry_data_packet(const struct dccp_rx_hist_entry *entry)
 {
@@ -184,12 +196,6 @@ static inline int
               entry->dccphrx_type == DCCP_PKT_DATAACK;
 }
 
-extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
-                                  struct list_head *rx_list,
-                                  struct list_head *li_list,
-                                  struct dccp_rx_hist_entry *packet,
-                                  u64 nonloss_seqno);
-
 extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
                                    struct list_head *li_list, u8 *win_loss);
 
index 45f30f59ea2ab134d741bf22ef1dc3330d5aee05..faf5f7e219e36a85ecdfdf182e81ba8e6b7ef4af 100644 (file)
  *  the Free Software Foundation; either version 2 of the License, or
  *  (at your option) any later version.
  */
-
 #include <linux/types.h>
+#include <asm/div64.h>
+
+/* integer-arithmetic divisions of type (a * 1000000)/b */
+static inline u64 scaled_div(u64 a, u32 b)
+{
+       BUG_ON(b==0);
+       a *= 1000000;
+       do_div(a, b);
+       return a;
+}
+
+static inline u32 scaled_div32(u64 a, u32 b)
+{
+       u64 result = scaled_div(a, b);
+
+       if (result > UINT_MAX) {
+               DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U",
+                         (unsigned long long)a, b);
+               return UINT_MAX;
+       }
+       return result;
+}
 
 extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
 extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
index ddac2c511e2f288014ad1650881586b514472eec..90009fd77e158f03e3e1a4002a23be34d0bd7e01 100644 (file)
@@ -13,7 +13,6 @@
  */
 
 #include <linux/module.h>
-#include <asm/div64.h>
 #include "../../dccp.h"
 #include "tfrc.h"
 
@@ -616,15 +615,12 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
  *  @R: RTT                  scaled by 1000000   (i.e., microseconds)
  *  @p: loss ratio estimate  scaled by 1000000
  *  Returns X_calc           in bytes per second (not scaled).
- *
- * Note: DO NOT alter this code unless you run test cases against it,
- *       as the code has been optimized to stop underflow/overflow.
  */
 u32 tfrc_calc_x(u16 s, u32 R, u32 p)
 {
-       int index;
+       u16 index;
        u32 f;
-       u64 tmp1, tmp2;
+       u64 result;
 
        /* check against invalid parameters and divide-by-zero   */
        BUG_ON(p >  1000000);           /* p must not exceed 100%   */
@@ -650,15 +646,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
                f = tfrc_calc_x_lookup[index][0];
        }
 
-       /* The following computes X = s/(R*f(p)) in bytes per second. Since f(p)
-        * and R are both scaled by 1000000, we need to multiply by 1000000^2.
-        * ==> DO NOT alter this unless you test against overflow on 32 bit   */
-       tmp1 = ((u64)s * 100000000);
-       tmp2 = ((u64)R * (u64)f);
-       do_div(tmp2, 10000);
-       do_div(tmp1, tmp2); 
-
-       return (u32)tmp1; 
+       /*
+        * Compute X = s/(R*f(p)) in bytes per second.
+        * Since f(p) and R are both scaled by 1000000, we need to multiply by
+        * 1000000^2. To avoid overflow, the result is computed in two stages.
+        * This works under almost all reasonable operational conditions, for a
+        * wide range of parameters. Yet, should some strange combination of
+        * parameters result in overflow, the use of scaled_div32 will catch
+        * this and return UINT_MAX - which is a logically adequate consequence.
+        */
+       result = scaled_div(s, R);
+       return scaled_div32(result, f);
 }
 
 EXPORT_SYMBOL_GPL(tfrc_calc_x);
index 68886986c8e4006165ff3fffc3f6c7138c4e5546..a0900bf98e6bbaf223f6cc3a02b500772ecc1781 100644 (file)
@@ -80,8 +80,6 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
 
 #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
 
-#define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */
-
 /* sysctl variables for DCCP */
 extern int  sysctl_dccp_request_retries;
 extern int  sysctl_dccp_retries1;
@@ -434,6 +432,7 @@ static inline void timeval_sub_usecs(struct timeval *tv,
                tv->tv_sec--;
                tv->tv_usec += USEC_PER_SEC;
        }
+       DCCP_BUG_ON(tv->tv_sec < 0);
 }
 
 #ifdef CONFIG_SYSCTL
index 4dc487f27a1fff1a0d8f01bb8146a337f01ec376..95b6927ec6530eef17fab12cc5784d101fa7ded9 100644 (file)
@@ -329,7 +329,7 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
        switch (type) {
        case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
        case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
-       default:             DCCP_WARN("invalid type %d\n", type); return;
+       default:             DCCP_WARN("invalid type %d\n", type); return;
 
        }
        opt->dccpop_feat = feature;
@@ -427,7 +427,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature,
        switch (type) {
        case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break;
        case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break;
-       default:              DCCP_WARN("invalid type %d\n", type);
+       default:              DCCP_WARN("invalid type %d\n", type);
                              return 1;
 
        }
@@ -610,7 +610,7 @@ const char *dccp_feat_typename(const u8 type)
        case DCCPO_CHANGE_R:  return("ChangeR");
        case DCCPO_CONFIRM_R: return("ConfirmR");
        /* the following case must not appear in feature negotation  */
-       default:              dccp_pr_debug("unknown type %d [BUG!]\n", type);
+       default:              dccp_pr_debug("unknown type %d [BUG!]\n", type);
        }
        return NULL;
 }
index 7371a2f3acf4f2826f4b47124a56fc3d0879af37..565bc80557ceb79139ea96c930652def3a56237d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  net/dccp/input.c
- * 
+ *
  *  An implementation of the DCCP protocol
  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *
@@ -82,7 +82,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
         *        Otherwise,
         *           Drop packet and return
         */
-       if (dh->dccph_type == DCCP_PKT_SYNC || 
+       if (dh->dccph_type == DCCP_PKT_SYNC ||
            dh->dccph_type == DCCP_PKT_SYNCACK) {
                if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
                              dp->dccps_awl, dp->dccps_awh) &&
@@ -185,8 +185,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
                dccp_rcv_close(sk, skb);
                return 0;
        case DCCP_PKT_REQUEST:
-               /* Step 7 
-                *   or (S.is_server and P.type == Response)
+               /* Step 7
+                *   or (S.is_server and P.type == Response)
                 *   or (S.is_client and P.type == Request)
                 *   or (S.state >= OPEN and P.type == Request
                 *      and P.seqno >= S.OSR)
@@ -248,8 +248,18 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
                            DCCP_ACKVEC_STATE_RECEIVED))
                goto discard;
 
-       ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
-       ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+       /*
+        * Deliver to the CCID module in charge.
+        * FIXME: Currently DCCP operates one-directional only, i.e. a listening
+        *        server is not at the same time a connecting client. There is
+        *        not much sense in delivering to both rx/tx sides at the moment
+        *        (only one is active at a time); when moving to bidirectional
+        *        service, this needs to be revised.
+        */
+       if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
+               ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
+       else
+               ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
 
        return __dccp_rcv_established(sk, skb, dh, len);
 discard:
@@ -264,7 +274,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
                                               const struct dccp_hdr *dh,
                                               const unsigned len)
 {
-       /* 
+       /*
         *  Step 4: Prepare sequence numbers in REQUEST
         *     If S.state == REQUEST,
         *        If (P.type == Response or P.type == Reset)
@@ -332,7 +342,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
                 *            from the Response * /
                 *        S.state := PARTOPEN
                 *        Set PARTOPEN timer
-                *        Continue with S.state == PARTOPEN
+                *        Continue with S.state == PARTOPEN
                 *        / * Step 12 will send the Ack completing the
                 *            three-way handshake * /
                 */
@@ -363,7 +373,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
                         */
                        __kfree_skb(skb);
                        return 0;
-               } 
+               }
                dccp_send_ack(sk);
                return -1;
        }
@@ -371,7 +381,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
 out_invalid_packet:
        /* dccp_v4_do_rcv will send a reset */
        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
-       return 1; 
+       return 1;
 }
 
 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
@@ -478,14 +488,17 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
                        dccp_event_ack_recv(sk, skb);
 
-               if (dccp_msk(sk)->dccpms_send_ack_vector &&
+               if (dccp_msk(sk)->dccpms_send_ack_vector &&
                    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-                                   DCCP_SKB_CB(skb)->dccpd_seq,
-                                   DCCP_ACKVEC_STATE_RECEIVED))
-                       goto discard;
+                                   DCCP_SKB_CB(skb)->dccpd_seq,
+                                   DCCP_ACKVEC_STATE_RECEIVED))
+                       goto discard;
 
-               ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
-               ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+               /* XXX see the comments in dccp_rcv_established about this */
+               if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
+                       ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
+               else
+                       ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
        }
 
        /*
@@ -567,7 +580,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                }
        }
 
-       if (!queued) { 
+       if (!queued) {
 discard:
                __kfree_skb(skb);
        }
index ff81679c9f1743970455cf387141ca4fb50700d2..90c74b4adb7396443a36d8124633e2d22abd5551 100644 (file)
@@ -157,7 +157,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
        /* We don't check in the destentry if pmtu discovery is forbidden
         * on this route. We just assume that no packet_to_big packets
         * are send back when pmtu discovery is not active.
-        * There is a small race when the user changes this flag in the
+        * There is a small race when the user changes this flag in the
         * route, but I think that's acceptable.
         */
        if ((dst = __sk_dst_check(sk, 0)) == NULL)
@@ -467,7 +467,7 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
                            .uli_u = { .ports =
                                       { .sport = dccp_hdr(skb)->dccph_dport,
                                         .dport = dccp_hdr(skb)->dccph_sport }
-                                    }
+                                    }
                          };
 
        security_skb_classify_flow(skb, &fl);
@@ -595,7 +595,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        struct inet_request_sock *ireq;
        struct request_sock *req;
        struct dccp_request_sock *dreq;
-       const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
+       const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
        struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
        __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
 
@@ -609,7 +609,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        if (dccp_bad_service_code(sk, service)) {
                reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
                goto drop;
-       }
+       }
        /*
         * TW buckets are converted to open requests without
         * limitations, they conserve resources and peer is
@@ -644,7 +644,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->rmt_addr = skb->nh.iph->saddr;
        ireq->opt       = NULL;
 
-       /* 
+       /*
         * Step 3: Process LISTEN state
         *
         * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
@@ -846,15 +846,15 @@ static int dccp_v4_rcv(struct sk_buff *skb)
        }
 
        /* Step 2:
-        *      Look up flow ID in table and get corresponding socket */
+        *      Look up flow ID in table and get corresponding socket */
        sk = __inet_lookup(&dccp_hashinfo,
                           skb->nh.iph->saddr, dh->dccph_sport,
                           skb->nh.iph->daddr, dh->dccph_dport,
                           inet_iif(skb));
 
-       /* 
+       /*
         * Step 2:
-        *      If no socket ...
+        *      If no socket ...
         */
        if (sk == NULL) {
                dccp_pr_debug("failed to look up flow ID in table and "
@@ -862,9 +862,9 @@ static int dccp_v4_rcv(struct sk_buff *skb)
                goto no_dccp_socket;
        }
 
-       /* 
+       /*
         * Step 2:
-        *      ... or S.state == TIMEWAIT,
+        *      ... or S.state == TIMEWAIT,
         *              Generate Reset(No Connection) unless P.type == Reset
         *              Drop packet and return
         */
@@ -876,8 +876,8 @@ static int dccp_v4_rcv(struct sk_buff *skb)
 
        /*
         * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
-        *      o if MinCsCov = 0, only packets with CsCov = 0 are accepted
-        *      o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
+        *      o if MinCsCov = 0, only packets with CsCov = 0 are accepted
+        *      o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
         */
        min_cov = dccp_sk(sk)->dccps_pcrlen;
        if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov))  {
@@ -900,7 +900,7 @@ no_dccp_socket:
                goto discard_it;
        /*
         * Step 2:
-        *      If no socket ...
+        *      If no socket ...
         *              Generate Reset(No Connection) unless P.type == Reset
         *              Drop packet and return
         */
index c7aaa2574f52d3036f28dcb8ba896e914800406b..6b91a9dd04111298c96127ea49e18d864602e9f9 100644 (file)
@@ -77,7 +77,7 @@ static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
 }
 
 static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
-                                                 __be16 sport, __be16 dport   )
+                                                 __be16 sport, __be16 dport   )
 {
        return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
 }
@@ -329,7 +329,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
        skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
                        GFP_ATOMIC);
        if (skb == NULL)
-               return;
+               return;
 
        skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
 
@@ -353,7 +353,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 
        dccp_csum_outgoing(skb);
        dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
-                                                     &rxskb->nh.ipv6h->daddr);
+                                                     &rxskb->nh.ipv6h->daddr);
 
        memset(&fl, 0, sizeof(fl));
        ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
@@ -424,7 +424,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        struct dccp_request_sock *dreq;
        struct inet6_request_sock *ireq6;
        struct ipv6_pinfo *np = inet6_sk(sk);
-       const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
+       const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
        struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
        __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
 
@@ -437,7 +437,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (dccp_bad_service_code(sk, service)) {
                reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
                goto drop;
-       }
+       }
        /*
         * There are no SYN attacks on IPv6, yet...
         */
@@ -787,7 +787,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                 * otherwise we just shortcircuit this and continue with
                 * the new socket..
                 */
-               if (nsk != sk) {
+               if (nsk != sk) {
                        if (dccp_child_process(sk, nsk, skb))
                                goto reset;
                        if (opt_skb != NULL)
@@ -843,14 +843,14 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
                DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 
        /* Step 2:
-        *      Look up flow ID in table and get corresponding socket */
+        *      Look up flow ID in table and get corresponding socket */
        sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
                            dh->dccph_sport,
                            &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
                            inet6_iif(skb));
        /*
         * Step 2:
-        *      If no socket ...
+        *      If no socket ...
         */
        if (sk == NULL) {
                dccp_pr_debug("failed to look up flow ID in table and "
@@ -860,7 +860,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
 
        /*
         * Step 2:
-        *      ... or S.state == TIMEWAIT,
+        *      ... or S.state == TIMEWAIT,
         *              Generate Reset(No Connection) unless P.type == Reset
         *              Drop packet and return
         */
@@ -872,8 +872,8 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
 
        /*
         * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
-        *      o if MinCsCov = 0, only packets with CsCov = 0 are accepted
-        *      o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
+        *      o if MinCsCov = 0, only packets with CsCov = 0 are accepted
+        *      o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
         */
        min_cov = dccp_sk(sk)->dccps_pcrlen;
        if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
@@ -893,7 +893,7 @@ no_dccp_socket:
                goto discard_it;
        /*
         * Step 2:
-        *      If no socket ...
+        *      If no socket ...
         *              Generate Reset(No Connection) unless P.type == Reset
         *              Drop packet and return
         */
index 4c9e26775f72ddad7a1c0f9f53c7c9656247e91f..6656bb497c7bde44aa37520cb5e65781b24179a6 100644 (file)
@@ -182,7 +182,7 @@ out_free:
 
 EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
 
-/* 
+/*
  * Process an incoming packet for RESPOND sockets represented
  * as an request_sock.
  */
index f398b43bc055fef04bd4f0fa72adaa06d73ca7be..c03ba61eb6da3c37a1bd225a24b566fdd2e89434 100644 (file)
@@ -557,11 +557,6 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
                        return -1;
                dp->dccps_hc_rx_insert_options = 0;
        }
-       if (dp->dccps_hc_tx_insert_options) {
-               if (ccid_hc_tx_insert_options(dp->dccps_hc_tx_ccid, sk, skb))
-                       return -1;
-               dp->dccps_hc_tx_insert_options = 0;
-       }
 
        /* Feature negotiation */
        /* Data packets can't do feat negotiation */
index 400c30b6fcae4d1e0357188d2081e60d06072e55..824569659083825304d8c1d38413cbc186466014 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  net/dccp/output.c
- * 
+ *
  *  An implementation of the DCCP protocol
  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *
@@ -175,14 +175,12 @@ void dccp_write_space(struct sock *sk)
 /**
  * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
  * @sk: socket to wait for
- * @timeo: for how long
  */
-static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
-                             long *timeo)
+static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        DEFINE_WAIT(wait);
-       long delay;
+       unsigned long delay;
        int rc;
 
        while (1) {
@@ -190,8 +188,6 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
 
                if (sk->sk_err)
                        goto do_error;
-               if (!*timeo)
-                       goto do_nonblock;
                if (signal_pending(current))
                        goto do_interrupted;
 
@@ -199,12 +195,9 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
                if (rc <= 0)
                        break;
                delay = msecs_to_jiffies(rc);
-               if (delay > *timeo || delay < 0)
-                       goto do_nonblock;
-
                sk->sk_write_pending++;
                release_sock(sk);
-               *timeo -= schedule_timeout(delay);
+               schedule_timeout(delay);
                lock_sock(sk);
                sk->sk_write_pending--;
        }
@@ -215,11 +208,8 @@ out:
 do_error:
        rc = -EPIPE;
        goto out;
-do_nonblock:
-       rc = -EAGAIN;
-       goto out;
 do_interrupted:
-       rc = sock_intr_errno(*timeo);
+       rc = -EINTR;
        goto out;
 }
 
@@ -240,8 +230,6 @@ void dccp_write_xmit(struct sock *sk, int block)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct sk_buff *skb;
-       long timeo = DCCP_XMIT_TIMEO;   /* If a packet is taking longer than
-                                          this we have other issues */
 
        while ((skb = skb_peek(&sk->sk_write_queue))) {
                int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
@@ -251,11 +239,9 @@ void dccp_write_xmit(struct sock *sk, int block)
                                sk_reset_timer(sk, &dp->dccps_xmit_timer,
                                                msecs_to_jiffies(err)+jiffies);
                                break;
-                       } else {
-                               err = dccp_wait_for_ccid(sk, skb, &timeo);
-                               timeo = DCCP_XMIT_TIMEO;
-                       }
-                       if (err)
+                       } else
+                               err = dccp_wait_for_ccid(sk, skb);
+                       if (err && err != -EINTR)
                                DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
                }
 
@@ -281,8 +267,10 @@ void dccp_write_xmit(struct sock *sk, int block)
                        if (err)
                                DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
                                         err);
-               } else
+               } else {
+                       dccp_pr_debug("packet discarded\n");
                        kfree(skb);
+               }
        }
 }
 
@@ -350,7 +338,6 @@ EXPORT_SYMBOL_GPL(dccp_make_response);
 
 static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
                                       const enum dccp_reset_codes code)
-                                  
 {
        struct dccp_hdr *dh;
        struct dccp_sock *dp = dccp_sk(sk);
@@ -431,14 +418,14 @@ static inline void dccp_connect_init(struct sock *sk)
        
        dccp_sync_mss(sk, dst_mtu(dst));
 
-       /*
+       /*
         * SWL and AWL are initially adjusted so that they are not less than
         * the initial Sequence Numbers received and sent, respectively:
         *      SWL := max(GSR + 1 - floor(W/4), ISR),
         *      AWL := max(GSS - W' + 1, ISS).
         * These adjustments MUST be applied only at the beginning of the
         * connection.
-        */
+        */
        dccp_update_gss(sk, dp->dccps_iss);
        dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
 
index 5ec47d9ee447e226a482010a13c3d97aa773cd26..63b3fa20e14bbb718d3baa9a7a16c2bcb1a503bd 100644 (file)
@@ -196,7 +196,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
                                                      sk, GFP_KERNEL);
                dp->dccps_hc_tx_ccid = ccid_hc_tx_new(dmsk->dccpms_tx_ccid,
                                                      sk, GFP_KERNEL);
-               if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
+               if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
                             dp->dccps_hc_tx_ccid == NULL)) {
                        ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
                        ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
@@ -390,7 +390,7 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
        struct dccp_sock *dp = dccp_sk(sk);
        struct dccp_service_list *sl = NULL;
 
-       if (service == DCCP_SERVICE_INVALID_VALUE || 
+       if (service == DCCP_SERVICE_INVALID_VALUE ||
            optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
                return -EINVAL;
 
@@ -830,7 +830,7 @@ EXPORT_SYMBOL_GPL(inet_dccp_listen);
 static const unsigned char dccp_new_state[] = {
        /* current state:   new state:      action:     */
        [0]               = DCCP_CLOSED,
-       [DCCP_OPEN]       = DCCP_CLOSING | DCCP_ACTION_FIN,
+       [DCCP_OPEN]       = DCCP_CLOSING | DCCP_ACTION_FIN,
        [DCCP_REQUESTING] = DCCP_CLOSED,
        [DCCP_PARTOPEN]   = DCCP_CLOSING | DCCP_ACTION_FIN,
        [DCCP_LISTEN]     = DCCP_CLOSED,
index e8f519e7f481133f7b3b50c5d317e7e7f3628cd3..e5348f369c60a09055e4f7b464d9e3520eb95f35 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  net/dccp/timer.c
- * 
+ *
  *  An implementation of the DCCP protocol
  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *
@@ -102,13 +102,13 @@ static void dccp_retransmit_timer(struct sock *sk)
         * sk->sk_send_head has to have one skb with
         * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
         * packet types. The only packets eligible for retransmission are:
-        *      -- Requests in client-REQUEST  state (sec. 8.1.1)
-        *      -- Acks     in client-PARTOPEN state (sec. 8.1.5)
-        *      -- CloseReq in server-CLOSEREQ state (sec. 8.3)
-        *      -- Close    in   node-CLOSING  state (sec. 8.3)                */
+        *      -- Requests in client-REQUEST  state (sec. 8.1.1)
+        *      -- Acks     in client-PARTOPEN state (sec. 8.1.5)
+        *      -- CloseReq in server-CLOSEREQ state (sec. 8.3)
+        *      -- Close    in   node-CLOSING  state (sec. 8.3)                */
        BUG_TRAP(sk->sk_send_head != NULL);
 
-       /* 
+       /*
         * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
         * sent, no need to retransmit, this sock is dead.
         */
@@ -200,7 +200,7 @@ static void dccp_keepalive_timer(unsigned long data)
        /* Only process if socket is not in use. */
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
-               /* Try again later. */ 
+               /* Try again later. */
                inet_csk_reset_keepalive_timer(sk, HZ / 20);
                goto out;
        }
index 91a075edd68e379fc8fcad685ac1ecd53c4801b7..7ea2d981a9328490df6dc684b6f1768fe0304f3d 100644 (file)
@@ -657,7 +657,7 @@ static void sync_master_loop(void)
                if (stop_master_sync)
                        break;
 
-               ssleep(1);
+               msleep_interruptible(1000);
        }
 
        /* clean up the sync_buff queue */
@@ -714,7 +714,7 @@ static void sync_backup_loop(void)
                if (stop_backup_sync)
                        break;
 
-               ssleep(1);
+               msleep_interruptible(1000);
        }
 
        /* release the sending multicast socket */
@@ -826,7 +826,7 @@ static int fork_sync_thread(void *startup)
        if ((pid = kernel_thread(sync_thread, startup, 0)) < 0) {
                IP_VS_ERR("could not create sync_thread due to %d... "
                          "retrying.\n", pid);
-               ssleep(1);
+               msleep_interruptible(1000);
                goto repeat;
        }
 
@@ -849,10 +849,12 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
 
        ip_vs_sync_state |= state;
        if (state == IP_VS_STATE_MASTER) {
-               strlcpy(ip_vs_master_mcast_ifn, mcast_ifn, sizeof(ip_vs_master_mcast_ifn));
+               strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
+                       sizeof(ip_vs_master_mcast_ifn));
                ip_vs_master_syncid = syncid;
        } else {
-               strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn, sizeof(ip_vs_backup_mcast_ifn));
+               strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
+                       sizeof(ip_vs_backup_mcast_ifn));
                ip_vs_backup_syncid = syncid;
        }
 
@@ -860,7 +862,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
        if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
                IP_VS_ERR("could not create fork_sync_thread due to %d... "
                          "retrying.\n", pid);
-               ssleep(1);
+               msleep_interruptible(1000);
                goto repeat;
        }
 
@@ -880,7 +882,8 @@ int stop_sync_thread(int state)
 
        IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
        IP_VS_INFO("stopping sync thread %d ...\n",
-                  (state == IP_VS_STATE_MASTER) ? sync_master_pid : sync_backup_pid);
+                  (state == IP_VS_STATE_MASTER) ?
+                  sync_master_pid : sync_backup_pid);
 
        __set_current_state(TASK_UNINTERRUPTIBLE);
        add_wait_queue(&stop_sync_wait, &wait);