]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Aug 2010 17:39:30 +0000 (10:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Aug 2010 17:39:30 +0000 (10:39 -0700)
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (30 commits)
  perf: Add back list_head data types
  perf ui hist browser: Fixup key bindings
  perf ui browser: Add ui_browser__show counterpart: __hide
  perf annotate: Cycle thru sorted lines with samples
  perf ui: Make SPACE work as PGDN in all browsers
  perf annotate: Sort by hottest lines in the TUI
  perf ui: Complete the breakdown of util/newt.c
  perf ui: Move hists browser to util/ui/browsers/
  perf symbols: Ignore mapping symbols on ARM
  perf ui: Move map browser to util/ui/browsers/
  perf ui: Move annotate browser to util/ui/browsers/
  perf ui: Move ui_progress routines to separate file in util/ui/
  perf ui: Move ui_helpline routines to separate file in util/ui/
  perf ui: Shorten ui_browser member names
  perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
  perf ui: Start breaking down newt.c into multiple files
  perf tui: Introduce list_head based generic ui_browser refresh routine
  perf probe: Fix memory leaks in add_perf_probe_events
  perf probe: Fix to copy the type for raw parameters
  perf report: Speed up exit path
  ...

73 files changed:
arch/x86/include/asm/page.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/tlb_uv.c
arch/x86/lib/atomic64_386_32.S
arch/x86/mm/fault.c
drivers/isdn/hardware/avm/c4.c
drivers/isdn/hardware/avm/t1pci.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/net/caif/caif_spi_slave.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/usb/usbnet.c
drivers/net/wan/farsync.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/p54/p54pci.c
drivers/s390/net/claw.c
drivers/s390/net/claw.h
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/ctcm_main.h
drivers/s390/net/ctcm_mpc.c
drivers/s390/net/ctcm_sysfs.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/mntpt.c
fs/afs/proc.c
fs/afs/super.c
fs/cifs/README
fs/file_table.c
fs/nfs/Kconfig
fs/nfs/dns_resolve.c
fs/nfs/dns_resolve.h
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/notify/inotify/inotify_fsnotify.c
fs/notify/notification.c
include/linux/etherdevice.h
include/linux/fsnotify.h
include/linux/fsnotify_backend.h
include/linux/netpoll.h
include/net/bluetooth/l2cap.h
include/net/sock.h
kernel/audit_watch.c
mm/memory.c
net/bluetooth/l2cap.c
net/caif/cfpkt_skbuff.c
net/can/bcm.c
net/dns_resolver/dns_key.c
net/dns_resolver/dns_query.c
net/dsa/Kconfig
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/wireless/mlme.c

index 625c3f0e741aab7bd75d839cadf7084c303baa61..8ca82839288a35e326f43fbac24a8384eb497652 100644 (file)
@@ -37,6 +37,13 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 #define __pa_nodebug(x)        __phys_addr_nodebug((unsigned long)(x))
 /* __pa_symbol should be used for C visible symbols.
    This seems to be the official gcc blessed way to do such arithmetic. */
+/*
+ * We need __phys_reloc_hide() here because gcc may assume that there is no
+ * overflow during __pa() calculation and can optimize it unexpectedly.
+ * Newer versions of gcc provide -fno-strict-overflow switch to handle this
+ * case properly. Once all supported versions of gcc understand it, we can
+ * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
+ */
 #define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
 
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
index aa558ac0306e5859bc41863861cc5a1620cf0c1e..42d412fd8b02cdd369b5cc8db5a67aa5cc7a0770 100644 (file)
@@ -34,6 +34,7 @@
  */
 
 #define UV_ITEMS_PER_DESCRIPTOR                8
+/* the 'throttle' to prevent the hardware stay-busy bug */
 #define MAX_BAU_CONCURRENT             3
 #define UV_CPUS_PER_ACT_STATUS         32
 #define UV_ACT_STATUS_MASK             0x3
 #define UV_DESC_BASE_PNODE_SHIFT       49
 #define UV_PAYLOADQ_PNODE_SHIFT                49
 #define UV_PTC_BASENAME                        "sgi_uv/ptc_statistics"
+#define UV_BAU_BASENAME                        "sgi_uv/bau_tunables"
+#define UV_BAU_TUNABLES_DIR            "sgi_uv"
+#define UV_BAU_TUNABLES_FILE           "bau_tunables"
+#define WHITESPACE                     " \t\n"
 #define uv_physnodeaddr(x)             ((__pa((unsigned long)(x)) & uv_mmask))
 #define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
 #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
+/* [19:16] SOFT_ACK timeout period  19: 1 is urgency 7  17:16 1 is multiplier */
+#define BAU_MISC_CONTROL_MULT_MASK 3
+
+#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
+/* [30:28] URGENCY_7  an index into a table of times */
+#define BAU_URGENCY_7_SHIFT 28
+#define BAU_URGENCY_7_MASK 7
+
+#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
+/* [45:40] BAU - BAU transaction timeout select - a multiplier */
+#define BAU_TRANS_SHIFT 40
+#define BAU_TRANS_MASK 0x3f
 
 /*
  * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
 #define DESC_STATUS_SOURCE_TIMEOUT     3
 
 /*
- * source side threshholds at which message retries print a warning
- */
-#define SOURCE_TIMEOUT_LIMIT           20
-#define DESTINATION_TIMEOUT_LIMIT      20
-
-/*
- * misc. delays, in microseconds
+ * delay for 'plugged' timeout retries, in microseconds
  */
-#define THROTTLE_DELAY                 10
-#define TIMEOUT_DELAY                  10
-#define BIOS_TO                                1000
-/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
+#define PLUGGED_DELAY                  10
 
 /*
  * threshholds at which to use IPI to free resources
  */
+/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
 #define PLUGSB4RESET 100
-#define TIMEOUTSB4RESET 100
+/* after this many consecutive timeouts, use IPI to release resources */
+#define TIMEOUTSB4RESET 1
+/* at this number uses of IPI to release resources, giveup the request */
+#define IPI_RESET_LIMIT 1
+/* after this # consecutive successes, bump up the throttle if it was lowered */
+#define COMPLETE_THRESHOLD 5
 
 /*
  * number of entries in the destination side payload queue
 #define FLUSH_GIVEUP                   3
 #define FLUSH_COMPLETE                 4
 
+/*
+ * tuning the action when the numalink network is extremely delayed
+ */
+#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
+#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
+#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
+
 /*
  * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
  * If the 'multilevel' flag in the header portion of the descriptor
@@ -300,37 +321,16 @@ struct bau_payload_queue_entry {
        /* bytes 24-31 */
 };
 
-/*
- * one per-cpu; to locate the software tables
- */
-struct bau_control {
-       struct bau_desc *descriptor_base;
+struct msg_desc {
+       struct bau_payload_queue_entry *msg;
+       int msg_slot;
+       int sw_ack_slot;
        struct bau_payload_queue_entry *va_queue_first;
        struct bau_payload_queue_entry *va_queue_last;
-       struct bau_payload_queue_entry *bau_msg_head;
-       struct bau_control *uvhub_master;
-       struct bau_control *socket_master;
-       unsigned long timeout_interval;
-       atomic_t active_descriptor_count;
-       int max_concurrent;
-       int max_concurrent_constant;
-       int retry_message_scans;
-       int plugged_tries;
-       int timeout_tries;
-       int ipi_attempts;
-       int conseccompletes;
-       short cpu;
-       short uvhub_cpu;
-       short uvhub;
-       short cpus_in_socket;
-       short cpus_in_uvhub;
-       unsigned short message_number;
-       unsigned short uvhub_quiesce;
-       short socket_acknowledge_count[DEST_Q_SIZE];
-       cycles_t send_message;
-       spinlock_t masks_lock;
-       spinlock_t uvhub_lock;
-       spinlock_t queue_lock;
+};
+
+struct reset_args {
+       int sender;
 };
 
 /*
@@ -344,18 +344,25 @@ struct ptc_stats {
        unsigned long s_dtimeout; /* destination side timeouts */
        unsigned long s_time; /* time spent in sending side */
        unsigned long s_retriesok; /* successful retries */
-       unsigned long s_ntargcpu; /* number of cpus targeted */
-       unsigned long s_ntarguvhub; /* number of uvhubs targeted */
-       unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
-       unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
-       unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
-       unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
-       unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
+       unsigned long s_ntargcpu; /* total number of cpu's targeted */
+       unsigned long s_ntargself; /* times the sending cpu was targeted */
+       unsigned long s_ntarglocals; /* targets of cpus on the local blade */
+       unsigned long s_ntargremotes; /* targets of cpus on remote blades */
+       unsigned long s_ntarglocaluvhub; /* targets of the local hub */
+       unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
+       unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
+       unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
+       unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
+       unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
+       unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
+       unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
        unsigned long s_resets_plug; /* ipi-style resets from plug state */
        unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
        unsigned long s_busy; /* status stayed busy past s/w timer */
        unsigned long s_throttles; /* waits in throttle */
        unsigned long s_retry_messages; /* retry broadcasts */
+       unsigned long s_bau_reenabled; /* for bau enable/disable */
+       unsigned long s_bau_disabled; /* for bau enable/disable */
        /* destination statistics */
        unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
        unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
@@ -370,6 +377,52 @@ struct ptc_stats {
        unsigned long d_rcanceled; /* number of messages canceled by resets */
 };
 
+/*
+ * one per-cpu; to locate the software tables
+ */
+struct bau_control {
+       struct bau_desc *descriptor_base;
+       struct bau_payload_queue_entry *va_queue_first;
+       struct bau_payload_queue_entry *va_queue_last;
+       struct bau_payload_queue_entry *bau_msg_head;
+       struct bau_control *uvhub_master;
+       struct bau_control *socket_master;
+       struct ptc_stats *statp;
+       unsigned long timeout_interval;
+       unsigned long set_bau_on_time;
+       atomic_t active_descriptor_count;
+       int plugged_tries;
+       int timeout_tries;
+       int ipi_attempts;
+       int conseccompletes;
+       int baudisabled;
+       int set_bau_off;
+       short cpu;
+       short uvhub_cpu;
+       short uvhub;
+       short cpus_in_socket;
+       short cpus_in_uvhub;
+       unsigned short message_number;
+       unsigned short uvhub_quiesce;
+       short socket_acknowledge_count[DEST_Q_SIZE];
+       cycles_t send_message;
+       spinlock_t uvhub_lock;
+       spinlock_t queue_lock;
+       /* tunables */
+       int max_bau_concurrent;
+       int max_bau_concurrent_constant;
+       int plugged_delay;
+       int plugsb4reset;
+       int timeoutsb4reset;
+       int ipi_reset_limit;
+       int complete_threshold;
+       int congested_response_us;
+       int congested_reps;
+       int congested_period;
+       cycles_t period_time;
+       long period_requests;
+};
+
 static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
 {
        return constant_test_bit(uvhub, &dstp->bits[0]);
index 980508c79082fadaacbc0339c0fe196d6a6dd5ce..e3b534cda49a8097dde55400083d7eeb8f9c694c 100644 (file)
@@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
                 * acpi lapic path already maps that address in
                 * acpi_register_lapic_address()
                 */
-               if (!acpi_lapic)
+               if (!acpi_lapic && !smp_found_config)
                        set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
 
                apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
index d86dbf7e54be4384aae5c2c886e3e690780f40f3..d7b6f7fb4fecfd44205d63ab6cfebfb400e8b21a 100644 (file)
@@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
 
 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
 
+static void __init smp_register_lapic_address(unsigned long address)
+{
+       mp_lapic_addr = address;
+
+       set_fixmap_nocache(FIX_APIC_BASE, address);
+       if (boot_cpu_physical_apicid == -1U) {
+               boot_cpu_physical_apicid  = read_apic_id();
+               apic_version[boot_cpu_physical_apicid] =
+                        GET_APIC_VERSION(apic_read(APIC_LVR));
+       }
+}
+
 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
 {
        char str[16];
@@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
        if (early)
                return 1;
 
+       /* Initialize the lapic mapping */
+       if (!acpi_lapic)
+               smp_register_lapic_address(mpc->lapic);
+
        if (mpc->oemptr)
                x86_init.mpparse.smp_read_mpc_oem(mpc);
 
index 7fea555929e24f2f411ea019b426e64c05ac2df0..59efb5390b37bec6d0de735c93483c35ba0ca43a 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include <linux/seq_file.h>
 #include <linux/proc_fs.h>
+#include <linux/debugfs.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 
 #include <asm/irq_vectors.h>
 #include <asm/timer.h>
 
-struct msg_desc {
-       struct bau_payload_queue_entry *msg;
-       int msg_slot;
-       int sw_ack_slot;
-       struct bau_payload_queue_entry *va_queue_first;
-       struct bau_payload_queue_entry *va_queue_last;
+/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
+static int timeout_base_ns[] = {
+               20,
+               160,
+               1280,
+               10240,
+               81920,
+               655360,
+               5242880,
+               167772160
 };
-
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD        0x000000000bUL
-
-static int uv_bau_max_concurrent __read_mostly;
-
+static int timeout_us;
 static int nobau;
+static int baudisabled;
+static spinlock_t disable_lock;
+static cycles_t congested_cycles;
+
+/* tunables: */
+static int max_bau_concurrent = MAX_BAU_CONCURRENT;
+static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
+static int plugged_delay = PLUGGED_DELAY;
+static int plugsb4reset = PLUGSB4RESET;
+static int timeoutsb4reset = TIMEOUTSB4RESET;
+static int ipi_reset_limit = IPI_RESET_LIMIT;
+static int complete_threshold = COMPLETE_THRESHOLD;
+static int congested_response_us = CONGESTED_RESPONSE_US;
+static int congested_reps = CONGESTED_REPS;
+static int congested_period = CONGESTED_PERIOD;
+static struct dentry *tunables_dir;
+static struct dentry *tunables_file;
+
 static int __init setup_nobau(char *arg)
 {
        nobau = 1;
@@ -52,10 +71,6 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
 static DEFINE_PER_CPU(struct bau_control, bau_control);
 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
 
-struct reset_args {
-       int sender;
-};
-
 /*
  * Determine the first node on a uvhub. 'Nodes' are used for kernel
  * memory allocation.
@@ -126,7 +141,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
        struct ptc_stats *stat;
 
        msg = mdp->msg;
-       stat = &per_cpu(ptcstats, bcp->cpu);
+       stat = bcp->statp;
        stat->d_retries++;
        /*
         * cancel any message from msg+1 to the retry itself
@@ -146,15 +161,14 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
                        slot2 = msg2 - mdp->va_queue_first;
                        mmr = uv_read_local_mmr
                                (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-                       msg_res = ((msg2->sw_ack_vector << 8) |
-                                  msg2->sw_ack_vector);
+                       msg_res = msg2->sw_ack_vector;
                        /*
                         * This is a message retry; clear the resources held
                         * by the previous message only if they timed out.
                         * If it has not timed out we have an unexpected
                         * situation to report.
                         */
-                       if (mmr & (msg_res << 8)) {
+                       if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
                                /*
                                 * is the resource timed out?
                                 * make everyone ignore the cancelled message.
@@ -164,9 +178,9 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
                                cancel_count++;
                                uv_write_local_mmr(
                                    UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
-                                       (msg_res << 8) | msg_res);
-                       } else
-                               printk(KERN_INFO "note bau retry: no effect\n");
+                                       (msg_res << UV_SW_ACK_NPENDING) |
+                                        msg_res);
+                       }
                }
        }
        if (!cancel_count)
@@ -190,7 +204,7 @@ static void uv_bau_process_message(struct msg_desc *mdp,
         * This must be a normal message, or retry of a normal message
         */
        msg = mdp->msg;
-       stat = &per_cpu(ptcstats, bcp->cpu);
+       stat = bcp->statp;
        if (msg->address == TLB_FLUSH_ALL) {
                local_flush_tlb();
                stat->d_alltlb++;
@@ -274,7 +288,7 @@ uv_do_reset(void *ptr)
 
        bcp = &per_cpu(bau_control, smp_processor_id());
        rap = (struct reset_args *)ptr;
-       stat = &per_cpu(ptcstats, bcp->cpu);
+       stat = bcp->statp;
        stat->d_resets++;
 
        /*
@@ -302,13 +316,13 @@ uv_do_reset(void *ptr)
                         */
                        mmr = uv_read_local_mmr
                                        (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
-                       msg_res = ((msg->sw_ack_vector << 8) |
-                                                  msg->sw_ack_vector);
+                       msg_res = msg->sw_ack_vector;
                        if (mmr & msg_res) {
                                stat->d_rcanceled++;
                                uv_write_local_mmr(
                                    UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
-                                                       msg_res);
+                                       (msg_res << UV_SW_ACK_NPENDING) |
+                                        msg_res);
                        }
                }
        }
@@ -386,17 +400,12 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
        unsigned long mmr_offset, int right_shift, int this_cpu,
        struct bau_control *bcp, struct bau_control *smaster, long try)
 {
-       int relaxes = 0;
        unsigned long descriptor_status;
-       unsigned long mmr;
-       unsigned long mask;
        cycles_t ttime;
-       cycles_t timeout_time;
-       struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
+       struct ptc_stats *stat = bcp->statp;
        struct bau_control *hmaster;
 
        hmaster = bcp->uvhub_master;
-       timeout_time = get_cycles() + bcp->timeout_interval;
 
        /* spin on the status MMR, waiting for it to go idle */
        while ((descriptor_status = (((unsigned long)
@@ -423,7 +432,8 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
                         * pending.  In that case hardware returns the
                         * ERROR that looks like a destination timeout.
                         */
-                       if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
+                       if (cycles_2_us(ttime - bcp->send_message) <
+                                                       timeout_us) {
                                bcp->conseccompletes = 0;
                                return FLUSH_RETRY_PLUGGED;
                        }
@@ -435,26 +445,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
                         * descriptor_status is still BUSY
                         */
                        cpu_relax();
-                       relaxes++;
-                       if (relaxes >= 10000) {
-                               relaxes = 0;
-                               if (get_cycles() > timeout_time) {
-                                       quiesce_local_uvhub(hmaster);
-
-                                       /* single-thread the register change */
-                                       spin_lock(&hmaster->masks_lock);
-                                       mmr = uv_read_local_mmr(mmr_offset);
-                                       mask = 0UL;
-                                       mask |= (3UL < right_shift);
-                                       mask = ~mask;
-                                       mmr &= mask;
-                                       uv_write_local_mmr(mmr_offset, mmr);
-                                       spin_unlock(&hmaster->masks_lock);
-                                       end_uvhub_quiesce(hmaster);
-                                       stat->s_busy++;
-                                       return FLUSH_GIVEUP;
-                               }
-                       }
                }
        }
        bcp->conseccompletes++;
@@ -494,56 +484,116 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
        return 1;
 }
 
+/*
+ * Our retries are blocked by all destination swack resources being
+ * in use, and a timeout is pending. In that case hardware immediately
+ * returns the ERROR that looks like a destination timeout.
+ */
+static void
+destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+                       struct bau_control *hmaster, struct ptc_stats *stat)
+{
+       udelay(bcp->plugged_delay);
+       bcp->plugged_tries++;
+       if (bcp->plugged_tries >= bcp->plugsb4reset) {
+               bcp->plugged_tries = 0;
+               quiesce_local_uvhub(hmaster);
+               spin_lock(&hmaster->queue_lock);
+               uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+               spin_unlock(&hmaster->queue_lock);
+               end_uvhub_quiesce(hmaster);
+               bcp->ipi_attempts++;
+               stat->s_resets_plug++;
+       }
+}
+
+static void
+destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
+                       struct bau_control *hmaster, struct ptc_stats *stat)
+{
+       hmaster->max_bau_concurrent = 1;
+       bcp->timeout_tries++;
+       if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
+               bcp->timeout_tries = 0;
+               quiesce_local_uvhub(hmaster);
+               spin_lock(&hmaster->queue_lock);
+               uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+               spin_unlock(&hmaster->queue_lock);
+               end_uvhub_quiesce(hmaster);
+               bcp->ipi_attempts++;
+               stat->s_resets_timeout++;
+       }
+}
+
+/*
+ * Completions are taking a very long time due to a congested numalink
+ * network.
+ */
+static void
+disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
+{
+       int tcpu;
+       struct bau_control *tbcp;
+
+       /* let only one cpu do this disabling */
+       spin_lock(&disable_lock);
+       if (!baudisabled && bcp->period_requests &&
+           ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
+               /* it becomes this cpu's job to turn on the use of the
+                  BAU again */
+               baudisabled = 1;
+               bcp->set_bau_off = 1;
+               bcp->set_bau_on_time = get_cycles() +
+                       sec_2_cycles(bcp->congested_period);
+               stat->s_bau_disabled++;
+               for_each_present_cpu(tcpu) {
+                       tbcp = &per_cpu(bau_control, tcpu);
+                               tbcp->baudisabled = 1;
+               }
+       }
+       spin_unlock(&disable_lock);
+}
+
 /**
  * uv_flush_send_and_wait
  *
  * Send a broadcast and wait for it to complete.
  *
- * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * The flush_mask contains the cpus the broadcast is to be sent to including
  * cpus that are on the local uvhub.
  *
- * Returns NULL if all flushing represented in the mask was done. The mask
- * is zeroed.
- * Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set, representing any cpus on the local
- * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
+ * Returns 0 if all flushing represented in the mask was done.
+ * Returns 1 if it gives up entirely and the original cpu mask is to be
+ * returned to the kernel.
  */
-const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
-                                            struct cpumask *flush_mask,
-                                            struct bau_control *bcp)
+int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+                          struct cpumask *flush_mask, struct bau_control *bcp)
 {
        int right_shift;
-       int uvhub;
-       int bit;
        int completion_status = 0;
        int seq_number = 0;
        long try = 0;
        int cpu = bcp->uvhub_cpu;
        int this_cpu = bcp->cpu;
-       int this_uvhub = bcp->uvhub;
        unsigned long mmr_offset;
        unsigned long index;
        cycles_t time1;
        cycles_t time2;
-       struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
+       cycles_t elapsed;
+       struct ptc_stats *stat = bcp->statp;
        struct bau_control *smaster = bcp->socket_master;
        struct bau_control *hmaster = bcp->uvhub_master;
 
-       /*
-        * Spin here while there are hmaster->max_concurrent or more active
-        * descriptors. This is the per-uvhub 'throttle'.
-        */
        if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
                        &hmaster->active_descriptor_count,
-                       hmaster->max_concurrent)) {
+                       hmaster->max_bau_concurrent)) {
                stat->s_throttles++;
                do {
                        cpu_relax();
                } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
                        &hmaster->active_descriptor_count,
-                       hmaster->max_concurrent));
+                       hmaster->max_bau_concurrent));
        }
-
        while (hmaster->uvhub_quiesce)
                cpu_relax();
 
@@ -557,23 +607,10 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
        }
        time1 = get_cycles();
        do {
-               /*
-                * Every message from any given cpu gets a unique message
-                * sequence number. But retries use that same number.
-                * Our message may have timed out at the destination because
-                * all sw-ack resources are in use and there is a timeout
-                * pending there.  In that case, our last send never got
-                * placed into the queue and we need to persist until it
-                * does.
-                *
-                * Make any retry a type MSG_RETRY so that the destination will
-                * free any resource held by a previous message from this cpu.
-                */
                if (try == 0) {
-                       /* use message type set by the caller the first time */
+                       bau_desc->header.msg_type = MSG_REGULAR;
                        seq_number = bcp->message_number++;
                } else {
-                       /* use RETRY type on all the rest; same sequence */
                        bau_desc->header.msg_type = MSG_RETRY;
                        stat->s_retry_messages++;
                }
@@ -581,50 +618,17 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
                index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
                        bcp->uvhub_cpu;
                bcp->send_message = get_cycles();
-
                uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
-
                try++;
                completion_status = uv_wait_completion(bau_desc, mmr_offset,
                        right_shift, this_cpu, bcp, smaster, try);
 
                if (completion_status == FLUSH_RETRY_PLUGGED) {
-                       /*
-                        * Our retries may be blocked by all destination swack
-                        * resources being consumed, and a timeout pending. In
-                        * that case hardware immediately returns the ERROR
-                        * that looks like a destination timeout.
-                        */
-                       udelay(TIMEOUT_DELAY);
-                       bcp->plugged_tries++;
-                       if (bcp->plugged_tries >= PLUGSB4RESET) {
-                               bcp->plugged_tries = 0;
-                               quiesce_local_uvhub(hmaster);
-                               spin_lock(&hmaster->queue_lock);
-                               uv_reset_with_ipi(&bau_desc->distribution,
-                                                       this_cpu);
-                               spin_unlock(&hmaster->queue_lock);
-                               end_uvhub_quiesce(hmaster);
-                               bcp->ipi_attempts++;
-                               stat->s_resets_plug++;
-                       }
+                       destination_plugged(bau_desc, bcp, hmaster, stat);
                } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
-                       hmaster->max_concurrent = 1;
-                       bcp->timeout_tries++;
-                       udelay(TIMEOUT_DELAY);
-                       if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
-                               bcp->timeout_tries = 0;
-                               quiesce_local_uvhub(hmaster);
-                               spin_lock(&hmaster->queue_lock);
-                               uv_reset_with_ipi(&bau_desc->distribution,
-                                                               this_cpu);
-                               spin_unlock(&hmaster->queue_lock);
-                               end_uvhub_quiesce(hmaster);
-                               bcp->ipi_attempts++;
-                               stat->s_resets_timeout++;
-                       }
+                       destination_timeout(bau_desc, bcp, hmaster, stat);
                }
-               if (bcp->ipi_attempts >= 3) {
+               if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
                        bcp->ipi_attempts = 0;
                        completion_status = FLUSH_GIVEUP;
                        break;
@@ -633,49 +637,36 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
        } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
                 (completion_status == FLUSH_RETRY_TIMEOUT));
        time2 = get_cycles();
-
-       if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
-           && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
-                       hmaster->max_concurrent++;
-
-       /*
-        * hold any cpu not timing out here; no other cpu currently held by
-        * the 'throttle' should enter the activation code
-        */
+       bcp->plugged_tries = 0;
+       bcp->timeout_tries = 0;
+       if ((completion_status == FLUSH_COMPLETE) &&
+           (bcp->conseccompletes > bcp->complete_threshold) &&
+           (hmaster->max_bau_concurrent <
+                                       hmaster->max_bau_concurrent_constant))
+                       hmaster->max_bau_concurrent++;
        while (hmaster->uvhub_quiesce)
                cpu_relax();
        atomic_dec(&hmaster->active_descriptor_count);
-
-       /* guard against cycles wrap */
-       if (time2 > time1)
-               stat->s_time += (time2 - time1);
-       else
-               stat->s_requestor--; /* don't count this one */
+       if (time2 > time1) {
+               elapsed = time2 - time1;
+               stat->s_time += elapsed;
+               if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
+                       bcp->period_requests++;
+                       bcp->period_time += elapsed;
+                       if ((elapsed > congested_cycles) &&
+                           (bcp->period_requests > bcp->congested_reps)) {
+                               disable_for_congestion(bcp, stat);
+                       }
+               }
+       } else
+               stat->s_requestor--;
        if (completion_status == FLUSH_COMPLETE && try > 1)
                stat->s_retriesok++;
        else if (completion_status == FLUSH_GIVEUP) {
-               /*
-                * Cause the caller to do an IPI-style TLB shootdown on
-                * the target cpu's, all of which are still in the mask.
-                */
                stat->s_giveup++;
-               return flush_mask;
-       }
-
-       /*
-        * Success, so clear the remote cpu's from the mask so we don't
-        * use the IPI method of shootdown on them.
-        */
-       for_each_cpu(bit, flush_mask) {
-               uvhub = uv_cpu_to_blade_id(bit);
-               if (uvhub == this_uvhub)
-                       continue;
-               cpumask_clear_cpu(bit, flush_mask);
+               return 1;
        }
-       if (!cpumask_empty(flush_mask))
-               return flush_mask;
-
-       return NULL;
+       return 0;
 }
 
 /**
@@ -707,70 +698,89 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                          struct mm_struct *mm,
                                          unsigned long va, unsigned int cpu)
 {
-       int remotes;
        int tcpu;
        int uvhub;
        int locals = 0;
+       int remotes = 0;
+       int hubs = 0;
        struct bau_desc *bau_desc;
        struct cpumask *flush_mask;
        struct ptc_stats *stat;
        struct bau_control *bcp;
+       struct bau_control *tbcp;
 
+       /* kernel was booted 'nobau' */
        if (nobau)
                return cpumask;
 
        bcp = &per_cpu(bau_control, cpu);
+       stat = bcp->statp;
+
+       /* bau was disabled due to slow response */
+       if (bcp->baudisabled) {
+               /* the cpu that disabled it must re-enable it */
+               if (bcp->set_bau_off) {
+                       if (get_cycles() >= bcp->set_bau_on_time) {
+                               stat->s_bau_reenabled++;
+                               baudisabled = 0;
+                               for_each_present_cpu(tcpu) {
+                                       tbcp = &per_cpu(bau_control, tcpu);
+                                       tbcp->baudisabled = 0;
+                                       tbcp->period_requests = 0;
+                                       tbcp->period_time = 0;
+                               }
+                       }
+               }
+               return cpumask;
+       }
+
        /*
         * Each sending cpu has a per-cpu mask which it fills from the caller's
-        * cpu mask.  Only remote cpus are converted to uvhubs and copied.
+        * cpu mask.  All cpus are converted to uvhubs and copied to the
+        * activation descriptor.
         */
        flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
-       /*
-        * copy cpumask to flush_mask, removing current cpu
-        * (current cpu should already have been flushed by the caller and
-        *  should never be returned if we return flush_mask)
-        */
+       /* don't actually do a shootdown of the local cpu */
        cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
        if (cpu_isset(cpu, *cpumask))
-               locals++;  /* current cpu was targeted */
+               stat->s_ntargself++;
 
        bau_desc = bcp->descriptor_base;
        bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
-
        bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
-       remotes = 0;
+
+       /* cpu statistics */
        for_each_cpu(tcpu, flush_mask) {
                uvhub = uv_cpu_to_blade_id(tcpu);
-               if (uvhub == bcp->uvhub) {
-                       locals++;
-                       continue;
-               }
                bau_uvhub_set(uvhub, &bau_desc->distribution);
-               remotes++;
-       }
-       if (remotes == 0) {
-               /*
-                * No off_hub flushing; return status for local hub.
-                * Return the caller's mask if all were local (the current
-                * cpu may be in that mask).
-                */
-               if (locals)
-                       return cpumask;
+               if (uvhub == bcp->uvhub)
+                       locals++;
                else
-                       return NULL;
+                       remotes++;
        }
-       stat = &per_cpu(ptcstats, cpu);
+       if ((locals + remotes) == 0)
+               return NULL;
        stat->s_requestor++;
-       stat->s_ntargcpu += remotes;
+       stat->s_ntargcpu += remotes + locals;
+       stat->s_ntargremotes += remotes;
+       stat->s_ntarglocals += locals;
        remotes = bau_uvhub_weight(&bau_desc->distribution);
-       stat->s_ntarguvhub += remotes;
-       if (remotes >= 16)
+
+       /* uvhub statistics */
+       hubs = bau_uvhub_weight(&bau_desc->distribution);
+       if (locals) {
+               stat->s_ntarglocaluvhub++;
+               stat->s_ntargremoteuvhub += (hubs - 1);
+       } else
+               stat->s_ntargremoteuvhub += hubs;
+       stat->s_ntarguvhub += hubs;
+       if (hubs >= 16)
                stat->s_ntarguvhub16++;
-       else if (remotes >= 8)
+       else if (hubs >= 8)
                stat->s_ntarguvhub8++;
-       else if (remotes >= 4)
+       else if (hubs >= 4)
                stat->s_ntarguvhub4++;
-       else if (remotes >= 2)
+       else if (hubs >= 2)
                stat->s_ntarguvhub2++;
        else
                stat->s_ntarguvhub1++;
@@ -779,10 +789,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
        bau_desc->payload.sending_cpu = cpu;
 
        /*
-        * uv_flush_send_and_wait returns null if all cpu's were messaged, or
-        * the adjusted flush_mask if any cpu's were not messaged.
+        * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+        * or 1 if it gave up and the original cpumask should be returned.
         */
-       return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
+       if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+               return NULL;
+       else
+               return cpumask;
 }
 
 /*
@@ -810,7 +823,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
 
        time_start = get_cycles();
        bcp = &per_cpu(bau_control, smp_processor_id());
-       stat = &per_cpu(ptcstats, smp_processor_id());
+       stat = bcp->statp;
        msgdesc.va_queue_first = bcp->va_queue_first;
        msgdesc.va_queue_last = bcp->va_queue_last;
        msg = bcp->bau_msg_head;
@@ -908,12 +921,12 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
 }
 
 static inline unsigned long long
-millisec_2_cycles(unsigned long millisec)
+microsec_2_cycles(unsigned long microsec)
 {
        unsigned long ns;
        unsigned long long cyc;
 
-       ns = millisec * 1000;
+       ns = microsec * 1000;
        cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
        return cyc;
 }
@@ -931,15 +944,19 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
 
        if (!cpu) {
                seq_printf(file,
-                       "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
+                       "# cpu sent stime self locals remotes ncpus localhub ");
                seq_printf(file,
-                       "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+                       "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
+               seq_printf(file,
+                       "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
                seq_printf(file,
                        "retries rok resetp resett giveup sto bz throt ");
                seq_printf(file,
                        "sw_ack recv rtime all ");
                seq_printf(file,
-                       "one mult none retry canc nocan reset rcan\n");
+                       "one mult none retry canc nocan reset rcan ");
+               seq_printf(file,
+                       "disable enable\n");
        }
        if (cpu < num_possible_cpus() && cpu_online(cpu)) {
                stat = &per_cpu(ptcstats, cpu);
@@ -947,18 +964,23 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
                seq_printf(file,
                        "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
                           cpu, stat->s_requestor, cycles_2_us(stat->s_time),
-                          stat->s_ntarguvhub, stat->s_ntarguvhub16,
+                          stat->s_ntargself, stat->s_ntarglocals,
+                          stat->s_ntargremotes, stat->s_ntargcpu,
+                          stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
+                          stat->s_ntarguvhub, stat->s_ntarguvhub16);
+               seq_printf(file, "%ld %ld %ld %ld %ld ",
                           stat->s_ntarguvhub8, stat->s_ntarguvhub4,
                           stat->s_ntarguvhub2, stat->s_ntarguvhub1,
-                          stat->s_ntargcpu, stat->s_dtimeout);
+                          stat->s_dtimeout);
                seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
                           stat->s_retry_messages, stat->s_retriesok,
                           stat->s_resets_plug, stat->s_resets_timeout,
                           stat->s_giveup, stat->s_stimeout,
                           stat->s_busy, stat->s_throttles);
+
                /* destination side statistics */
                seq_printf(file,
-                          "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
+                          "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
                           uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
                                        UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
                           stat->d_requestee, cycles_2_us(stat->d_time),
@@ -966,15 +988,36 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
                           stat->d_nomsg, stat->d_retries, stat->d_canceled,
                           stat->d_nocanceled, stat->d_resets,
                           stat->d_rcanceled);
+               seq_printf(file, "%ld %ld\n",
+                       stat->s_bau_disabled, stat->s_bau_reenabled);
        }
 
        return 0;
 }
 
+/*
+ * Display the tunables thru debugfs
+ */
+static ssize_t tunables_read(struct file *file, char __user *userbuf,
+                                               size_t count, loff_t *ppos)
+{
+       char buf[300];
+       int ret;
+
+       ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
+               "max_bau_concurrent plugged_delay plugsb4reset",
+               "timeoutsb4reset ipi_reset_limit complete_threshold",
+               "congested_response_us congested_reps congested_period",
+               max_bau_concurrent, plugged_delay, plugsb4reset,
+               timeoutsb4reset, ipi_reset_limit, complete_threshold,
+               congested_response_us, congested_reps, congested_period);
+
+       return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+}
+
 /*
  * -1: resetf the statistics
  *  0: display meaning of the statistics
- * >0: maximum concurrent active descriptors per uvhub (throttle)
  */
 static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
                                 size_t count, loff_t *data)
@@ -983,7 +1026,6 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
        long input_arg;
        char optstr[64];
        struct ptc_stats *stat;
-       struct bau_control *bcp;
 
        if (count == 0 || count > sizeof(optstr))
                return -EINVAL;
@@ -1059,29 +1101,158 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
                "reset:    number of ipi-style reset requests processed\n");
                printk(KERN_DEBUG
                "rcan:     number messages canceled by reset requests\n");
+               printk(KERN_DEBUG
+               "disable:  number times use of the BAU was disabled\n");
+               printk(KERN_DEBUG
+               "enable:   number times use of the BAU was re-enabled\n");
        } else if (input_arg == -1) {
                for_each_present_cpu(cpu) {
                        stat = &per_cpu(ptcstats, cpu);
                        memset(stat, 0, sizeof(struct ptc_stats));
                }
-       } else {
-               uv_bau_max_concurrent = input_arg;
-               bcp = &per_cpu(bau_control, smp_processor_id());
-               if (uv_bau_max_concurrent < 1 ||
-                   uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
-                       printk(KERN_DEBUG
-                               "Error: BAU max concurrent %d; %d is invalid\n",
-                               bcp->max_concurrent, uv_bau_max_concurrent);
-                       return -EINVAL;
-               }
-               printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
-                      uv_bau_max_concurrent);
-               for_each_present_cpu(cpu) {
-                       bcp = &per_cpu(bau_control, cpu);
-                       bcp->max_concurrent = uv_bau_max_concurrent;
+       }
+
+       return count;
+}
+
+static int local_atoi(const char *name)
+{
+       int val = 0;
+
+       for (;; name++) {
+               switch (*name) {
+               case '0' ... '9':
+                       val = 10*val+(*name-'0');
+                       break;
+               default:
+                       return val;
                }
        }
+}
+
+/*
+ * set the tunables
+ * 0 values reset them to defaults
+ */
+static ssize_t tunables_write(struct file *file, const char __user *user,
+                                size_t count, loff_t *data)
+{
+       int cpu;
+       int cnt = 0;
+       int val;
+       char *p;
+       char *q;
+       char instr[64];
+       struct bau_control *bcp;
+
+       if (count == 0 || count > sizeof(instr)-1)
+               return -EINVAL;
+       if (copy_from_user(instr, user, count))
+               return -EFAULT;
+
+       instr[count] = '\0';
+       /* count the fields */
+       p = instr + strspn(instr, WHITESPACE);
+       q = p;
+       for (; *p; p = q + strspn(q, WHITESPACE)) {
+               q = p + strcspn(p, WHITESPACE);
+               cnt++;
+               if (q == p)
+                       break;
+       }
+       if (cnt != 9) {
+               printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
+               return -EINVAL;
+       }
 
+       p = instr + strspn(instr, WHITESPACE);
+       q = p;
+       for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
+               q = p + strcspn(p, WHITESPACE);
+               val = local_atoi(p);
+               switch (cnt) {
+               case 0:
+                       if (val == 0) {
+                               max_bau_concurrent = MAX_BAU_CONCURRENT;
+                               max_bau_concurrent_constant =
+                                                       MAX_BAU_CONCURRENT;
+                               continue;
+                       }
+                       bcp = &per_cpu(bau_control, smp_processor_id());
+                       if (val < 1 || val > bcp->cpus_in_uvhub) {
+                               printk(KERN_DEBUG
+                               "Error: BAU max concurrent %d is invalid\n",
+                               val);
+                               return -EINVAL;
+                       }
+                       max_bau_concurrent = val;
+                       max_bau_concurrent_constant = val;
+                       continue;
+               case 1:
+                       if (val == 0)
+                               plugged_delay = PLUGGED_DELAY;
+                       else
+                               plugged_delay = val;
+                       continue;
+               case 2:
+                       if (val == 0)
+                               plugsb4reset = PLUGSB4RESET;
+                       else
+                               plugsb4reset = val;
+                       continue;
+               case 3:
+                       if (val == 0)
+                               timeoutsb4reset = TIMEOUTSB4RESET;
+                       else
+                               timeoutsb4reset = val;
+                       continue;
+               case 4:
+                       if (val == 0)
+                               ipi_reset_limit = IPI_RESET_LIMIT;
+                       else
+                               ipi_reset_limit = val;
+                       continue;
+               case 5:
+                       if (val == 0)
+                               complete_threshold = COMPLETE_THRESHOLD;
+                       else
+                               complete_threshold = val;
+                       continue;
+               case 6:
+                       if (val == 0)
+                               congested_response_us = CONGESTED_RESPONSE_US;
+                       else
+                               congested_response_us = val;
+                       continue;
+               case 7:
+                       if (val == 0)
+                               congested_reps = CONGESTED_REPS;
+                       else
+                               congested_reps = val;
+                       continue;
+               case 8:
+                       if (val == 0)
+                               congested_period = CONGESTED_PERIOD;
+                       else
+                               congested_period = val;
+                       continue;
+               }
+               if (q == p)
+                       break;
+       }
+       for_each_present_cpu(cpu) {
+               bcp = &per_cpu(bau_control, cpu);
+               bcp->max_bau_concurrent = max_bau_concurrent;
+               bcp->max_bau_concurrent_constant = max_bau_concurrent;
+               bcp->plugged_delay = plugged_delay;
+               bcp->plugsb4reset = plugsb4reset;
+               bcp->timeoutsb4reset = timeoutsb4reset;
+               bcp->ipi_reset_limit = ipi_reset_limit;
+               bcp->complete_threshold = complete_threshold;
+               bcp->congested_response_us = congested_response_us;
+               bcp->congested_reps = congested_reps;
+               bcp->congested_period = congested_period;
+       }
        return count;
 }
 
@@ -1097,6 +1268,11 @@ static int uv_ptc_proc_open(struct inode *inode, struct file *file)
        return seq_open(file, &uv_ptc_seq_ops);
 }
 
+static int tunables_open(struct inode *inode, struct file *file)
+{
+       return 0;
+}
+
 static const struct file_operations proc_uv_ptc_operations = {
        .open           = uv_ptc_proc_open,
        .read           = seq_read,
@@ -1105,6 +1281,12 @@ static const struct file_operations proc_uv_ptc_operations = {
        .release        = seq_release,
 };
 
+static const struct file_operations tunables_fops = {
+       .open           = tunables_open,
+       .read           = tunables_read,
+       .write          = tunables_write,
+};
+
 static int __init uv_ptc_init(void)
 {
        struct proc_dir_entry *proc_uv_ptc;
@@ -1119,6 +1301,20 @@ static int __init uv_ptc_init(void)
                       UV_PTC_BASENAME);
                return -EINVAL;
        }
+
+       tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
+       if (!tunables_dir) {
+               printk(KERN_ERR "unable to create debugfs directory %s\n",
+                      UV_BAU_TUNABLES_DIR);
+               return -EINVAL;
+       }
+       tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
+                       tunables_dir, NULL, &tunables_fops);
+       if (!tunables_file) {
+               printk(KERN_ERR "unable to create debugfs file %s\n",
+                      UV_BAU_TUNABLES_FILE);
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -1258,16 +1454,45 @@ static void __init uv_init_uvhub(int uvhub, int vector)
                                      ((apicid << 32) | vector));
 }
 
+/*
+ * We will set BAU_MISC_CONTROL with a timeout period.
+ * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
+ * So the destination timeout period has be be calculated from them.
+ */
+static int
+calculate_destination_timeout(void)
+{
+       unsigned long mmr_image;
+       int mult1;
+       int mult2;
+       int index;
+       int base;
+       int ret;
+       unsigned long ts_ns;
+
+       mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
+       mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+       index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
+       mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
+       mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
+       base = timeout_base_ns[index];
+       ts_ns = base * mult1 * mult2;
+       ret = ts_ns / 1000;
+       return ret;
+}
+
 /*
  * initialize the bau_control structure for each cpu
  */
 static void uv_init_per_cpu(int nuvhubs)
 {
-       int i, j, k;
+       int i;
        int cpu;
        int pnode;
        int uvhub;
        short socket = 0;
+       unsigned short socket_mask;
+       unsigned int uvhub_mask;
        struct bau_control *bcp;
        struct uvhub_desc *bdp;
        struct socket_desc *sdp;
@@ -1278,7 +1503,7 @@ static void uv_init_per_cpu(int nuvhubs)
                short cpu_number[16];
        };
        struct uvhub_desc {
-               short num_sockets;
+               unsigned short socket_mask;
                short num_cpus;
                short uvhub;
                short pnode;
@@ -1286,57 +1511,83 @@ static void uv_init_per_cpu(int nuvhubs)
        };
        struct uvhub_desc *uvhub_descs;
 
+       timeout_us = calculate_destination_timeout();
+
        uvhub_descs = (struct uvhub_desc *)
                kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
        memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
        for_each_present_cpu(cpu) {
                bcp = &per_cpu(bau_control, cpu);
                memset(bcp, 0, sizeof(struct bau_control));
-               spin_lock_init(&bcp->masks_lock);
-               bcp->max_concurrent = uv_bau_max_concurrent;
                pnode = uv_cpu_hub_info(cpu)->pnode;
                uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+               uvhub_mask |= (1 << uvhub);
                bdp = &uvhub_descs[uvhub];
                bdp->num_cpus++;
                bdp->uvhub = uvhub;
                bdp->pnode = pnode;
-               /* time interval to catch a hardware stay-busy bug */
-               bcp->timeout_interval = millisec_2_cycles(3);
-               /* kludge: assume uv_hub.h is constant */
-               socket = (cpu_physical_id(cpu)>>5)&1;
-               if (socket >= bdp->num_sockets)
-                       bdp->num_sockets = socket+1;
+               /* kludge: 'assuming' one node per socket, and assuming that
+                  disabling a socket just leaves a gap in node numbers */
+               socket = (cpu_to_node(cpu) & 1);;
+               bdp->socket_mask |= (1 << socket);
                sdp = &bdp->socket[socket];
                sdp->cpu_number[sdp->num_cpus] = cpu;
                sdp->num_cpus++;
        }
-       socket = 0;
-       for_each_possible_blade(uvhub) {
+       uvhub = 0;
+       while (uvhub_mask) {
+               if (!(uvhub_mask & 1))
+                       goto nexthub;
                bdp = &uvhub_descs[uvhub];
-               for (i = 0; i < bdp->num_sockets; i++) {
-                       sdp = &bdp->socket[i];
-                       for (j = 0; j < sdp->num_cpus; j++) {
-                               cpu = sdp->cpu_number[j];
+               socket_mask = bdp->socket_mask;
+               socket = 0;
+               while (socket_mask) {
+                       if (!(socket_mask & 1))
+                               goto nextsocket;
+                       sdp = &bdp->socket[socket];
+                       for (i = 0; i < sdp->num_cpus; i++) {
+                               cpu = sdp->cpu_number[i];
                                bcp = &per_cpu(bau_control, cpu);
                                bcp->cpu = cpu;
-                               if (j == 0) {
+                               if (i == 0) {
                                        smaster = bcp;
-                                       if (i == 0)
+                                       if (socket == 0)
                                                hmaster = bcp;
                                }
                                bcp->cpus_in_uvhub = bdp->num_cpus;
                                bcp->cpus_in_socket = sdp->num_cpus;
                                bcp->socket_master = smaster;
+                               bcp->uvhub = bdp->uvhub;
                                bcp->uvhub_master = hmaster;
-                               for (k = 0; k < DEST_Q_SIZE; k++)
-                                       bcp->socket_acknowledge_count[k] = 0;
-                               bcp->uvhub_cpu =
-                                 uv_cpu_hub_info(cpu)->blade_processor_id;
+                               bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
+                                               blade_processor_id;
                        }
+nextsocket:
                        socket++;
+                       socket_mask = (socket_mask >> 1);
                }
+nexthub:
+               uvhub++;
+               uvhub_mask = (uvhub_mask >> 1);
        }
        kfree(uvhub_descs);
+       for_each_present_cpu(cpu) {
+               bcp = &per_cpu(bau_control, cpu);
+               bcp->baudisabled = 0;
+               bcp->statp = &per_cpu(ptcstats, cpu);
+               /* time interval to catch a hardware stay-busy bug */
+               bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
+               bcp->max_bau_concurrent = max_bau_concurrent;
+               bcp->max_bau_concurrent_constant = max_bau_concurrent;
+               bcp->plugged_delay = plugged_delay;
+               bcp->plugsb4reset = plugsb4reset;
+               bcp->timeoutsb4reset = timeoutsb4reset;
+               bcp->ipi_reset_limit = ipi_reset_limit;
+               bcp->complete_threshold = complete_threshold;
+               bcp->congested_response_us = congested_response_us;
+               bcp->congested_reps = congested_reps;
+               bcp->congested_period = congested_period;
+       }
 }
 
 /*
@@ -1361,10 +1612,11 @@ static int __init uv_bau_init(void)
                zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
                                       GFP_KERNEL, cpu_to_node(cur_cpu));
 
-       uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
        uv_nshift = uv_hub_info->m_val;
        uv_mmask = (1UL << uv_hub_info->m_val) - 1;
        nuvhubs = uv_num_possible_blades();
+       spin_lock_init(&disable_lock);
+       congested_cycles = microsec_2_cycles(congested_response_us);
 
        uv_init_per_cpu(nuvhubs);
 
@@ -1383,15 +1635,19 @@ static int __init uv_bau_init(void)
        alloc_intr_gate(vector, uv_bau_message_intr1);
 
        for_each_possible_blade(uvhub) {
-               pnode = uv_blade_to_pnode(uvhub);
-               /* INIT the bau */
-               uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
-                                     ((unsigned long)1 << 63));
-               mmr = 1; /* should be 1 to broadcast to both sockets */
-               uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+               if (uv_blade_nr_possible_cpus(uvhub)) {
+                       pnode = uv_blade_to_pnode(uvhub);
+                       /* INIT the bau */
+                       uv_write_global_mmr64(pnode,
+                                       UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+                                       ((unsigned long)1 << 63));
+                       mmr = 1; /* should be 1 to broadcast to both sockets */
+                       uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
+                                               mmr);
+               }
        }
 
        return 0;
 }
 core_initcall(uv_bau_init);
-core_initcall(uv_ptc_init);
+fs_initcall(uv_ptc_init);
index 4a5979aa6883addce8921161b68bc9d8b7206ffc..2cda60a06e654ae6e66c5f8f4dadb1f85c5b7b1d 100644 (file)
        CFI_ADJUST_CFA_OFFSET -4
 .endm
 
-.macro BEGIN func reg
-$v = \reg
-
-ENTRY(atomic64_\func\()_386)
-       CFI_STARTPROC
-       LOCK $v
-
-.macro RETURN
-       UNLOCK $v
+#define BEGIN(op) \
+.macro endp; \
+       CFI_ENDPROC; \
+ENDPROC(atomic64_##op##_386); \
+.purgem endp; \
+.endm; \
+ENTRY(atomic64_##op##_386); \
+       CFI_STARTPROC; \
+       LOCK v;
+
+#define ENDP endp
+
+#define RET \
+       UNLOCK v; \
        ret
-.endm
-
-.macro END_
-       CFI_ENDPROC
-ENDPROC(atomic64_\func\()_386)
-.purgem RETURN
-.purgem END_
-.purgem END
-.endm
-
-.macro END
-RETURN
-END_
-.endm
-.endm
 
-BEGIN read %ecx
-       movl  ($v), %eax
-       movl 4($v), %edx
-END
-
-BEGIN set %esi
-       movl %ebx,  ($v)
-       movl %ecx, 4($v)
-END
-
-BEGIN xchg %esi
-       movl  ($v), %eax
-       movl 4($v), %edx
-       movl %ebx,  ($v)
-       movl %ecx, 4($v)
-END
-
-BEGIN add %ecx
-       addl %eax,  ($v)
-       adcl %edx, 4($v)
-END
-
-BEGIN add_return %ecx
-       addl  ($v), %eax
-       adcl 4($v), %edx
-       movl %eax,  ($v)
-       movl %edx, 4($v)
-END
-
-BEGIN sub %ecx
-       subl %eax,  ($v)
-       sbbl %edx, 4($v)
-END
-
-BEGIN sub_return %ecx
+#define RET_ENDP \
+       RET; \
+       ENDP
+
+#define v %ecx
+BEGIN(read)
+       movl  (v), %eax
+       movl 4(v), %edx
+RET_ENDP
+#undef v
+
+#define v %esi
+BEGIN(set)
+       movl %ebx,  (v)
+       movl %ecx, 4(v)
+RET_ENDP
+#undef v
+
+#define v  %esi
+BEGIN(xchg)
+       movl  (v), %eax
+       movl 4(v), %edx
+       movl %ebx,  (v)
+       movl %ecx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %ecx
+BEGIN(add)
+       addl %eax,  (v)
+       adcl %edx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %ecx
+BEGIN(add_return)
+       addl  (v), %eax
+       adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %ecx
+BEGIN(sub)
+       subl %eax,  (v)
+       sbbl %edx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %ecx
+BEGIN(sub_return)
        negl %edx
        negl %eax
        sbbl $0, %edx
-       addl  ($v), %eax
-       adcl 4($v), %edx
-       movl %eax,  ($v)
-       movl %edx, 4($v)
-END
-
-BEGIN inc %esi
-       addl $1,  ($v)
-       adcl $0, 4($v)
-END
-
-BEGIN inc_return %esi
-       movl  ($v), %eax
-       movl 4($v), %edx
+       addl  (v), %eax
+       adcl 4(v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %esi
+BEGIN(inc)
+       addl $1,  (v)
+       adcl $0, 4(v)
+RET_ENDP
+#undef v
+
+#define v %esi
+BEGIN(inc_return)
+       movl  (v), %eax
+       movl 4(v), %edx
        addl $1, %eax
        adcl $0, %edx
-       movl %eax,  ($v)
-       movl %edx, 4($v)
-END
-
-BEGIN dec %esi
-       subl $1,  ($v)
-       sbbl $0, 4($v)
-END
-
-BEGIN dec_return %esi
-       movl  ($v), %eax
-       movl 4($v), %edx
+       movl %eax,  (v)
+       movl %edx, 4(v)
+RET_ENDP
+#undef v
+
+#define v %esi
+BEGIN(dec)
+       subl $1,  (v)
+       sbbl $0, 4(v)
+RET_ENDP
+#undef v
+
+#define v %esi
+BEGIN(dec_return)
+       movl  (v), %eax
+       movl 4(v), %edx
        subl $1, %eax
        sbbl $0, %edx
-       movl %eax,  ($v)
-       movl %edx, 4($v)
-END
+       movl %eax,  (v)
+       movl %edx, 4(v)
+RET_ENDP
+#undef v
 
-BEGIN add_unless %ecx
+#define v %ecx
+BEGIN(add_unless)
        addl %eax, %esi
        adcl %edx, %edi
-       addl  ($v), %eax
-       adcl 4($v), %edx
+       addl  (v), %eax
+       adcl 4(v), %edx
        cmpl %eax, %esi
        je 3f
 1:
-       movl %eax,  ($v)
-       movl %edx, 4($v)
+       movl %eax,  (v)
+       movl %edx, 4(v)
        movl $1, %eax
 2:
-RETURN
+       RET
 3:
        cmpl %edx, %edi
        jne 1b
        xorl %eax, %eax
        jmp 2b
-END_
+ENDP
+#undef v
 
-BEGIN inc_not_zero %esi
-       movl  ($v), %eax
-       movl 4($v), %edx
+#define v %esi
+BEGIN(inc_not_zero)
+       movl  (v), %eax
+       movl 4(v), %edx
        testl %eax, %eax
        je 3f
 1:
        addl $1, %eax
        adcl $0, %edx
-       movl %eax,  ($v)
-       movl %edx, 4($v)
+       movl %eax,  (v)
+       movl %edx, 4(v)
        movl $1, %eax
 2:
-RETURN
+       RET
 3:
        testl %edx, %edx
        jne 1b
        jmp 2b
-END_
+ENDP
+#undef v
 
-BEGIN dec_if_positive %esi
-       movl  ($v), %eax
-       movl 4($v), %edx
+#define v %esi
+BEGIN(dec_if_positive)
+       movl  (v), %eax
+       movl 4(v), %edx
        subl $1, %eax
        sbbl $0, %edx
        js 1f
-       movl %eax,  ($v)
-       movl %edx, 4($v)
+       movl %eax,  (v)
+       movl %edx, 4(v)
 1:
-END
+RET_ENDP
+#undef v
index f62777940dfbcc1ef534f9a90dbd96873ba453e1..4c4508e8a2043015c1cce3eb49a46c0c435e9297 100644 (file)
@@ -802,8 +802,10 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        up_read(&mm->mmap_sem);
 
        /* Kernel mode? Handle exceptions or die: */
-       if (!(error_code & PF_USER))
+       if (!(error_code & PF_USER)) {
                no_context(regs, error_code, address);
+               return;
+       }
 
        /* User-space => ok to do another page fault: */
        if (is_prefetch(regs, error_code, address))
index 7715d3242ec81849dbd725cbbaaf52a7cd597380..d3530f6e8115350668b148377d61a85256883a0a 100644 (file)
@@ -1273,6 +1273,7 @@ static int __devinit c4_probe(struct pci_dev *dev,
        if (retval != 0) {
                printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
                       nr, param.port, param.irq, param.membase);
+               pci_disable_device(dev);
                return -ENODEV;
        }
        return 0;
index 5a3f830980185c45614fb4fbf59f51924032223c..a79eb5afb92dda279d7634b51bd20a0e047ace40 100644 (file)
@@ -210,6 +210,7 @@ static int __devinit t1pci_probe(struct pci_dev *dev,
        if (retval != 0) {
                printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
                       param.port, param.irq, param.membase);
+               pci_disable_device(dev);
                return -ENODEV;
        }
        return 0;
index d2dd61d65d519687fbb20493627b8d62045e37d2..af25e1f3efd4a784daa5aa78128e908e07cef648 100644 (file)
@@ -1094,6 +1094,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                pr_info("mISDN: do not have informations about adapter at %s\n",
                        pci_name(pdev));
                kfree(card);
+               pci_disable_device(pdev);
                return -EINVAL;
        } else
                pr_notice("mISDN: found adapter %s at %s\n",
@@ -1103,7 +1104,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, card);
        err = setup_instance(card);
        if (err) {
-               pci_disable_device(card->pdev);
+               pci_disable_device(pdev);
                kfree(card);
                pci_set_drvdata(pdev, NULL);
        } else if (ent->driver_data == INF_SCT_1) {
@@ -1114,6 +1115,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
                        if (!sc) {
                                release_card(card);
+                               pci_disable_device(pdev);
                                return -ENOMEM;
                        }
                        sc->irq = card->irq;
@@ -1121,6 +1123,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        sc->ci = card->ci + i;
                        err = setup_instance(sc);
                        if (err) {
+                               pci_disable_device(pdev);
                                kfree(sc);
                                release_card(card);
                                break;
index 077ccf840edf24dc46107334c39c5758de248465..2111dbfea6feb8b53f56e73567e54fd4d11a818b 100644 (file)
 #include <net/caif/caif_spi.h>
 
 #ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS SPI_CMD_SZ
+#define SPI_DATA_POS 0
 static inline int forward_to_spi_cmd(struct cfspi *cfspi)
 {
        return cfspi->rx_cpck_len;
 }
 #else
-#define SPI_DATA_POS 0
+#define SPI_DATA_POS SPI_CMD_SZ
 static inline int forward_to_spi_cmd(struct cfspi *cfspi)
 {
        return 0;
index a527e37728cd9fe4566445b78bd2c1cf47bc0a88..eb799b36c86a391f90bb7f07074257084f4047ba 100644 (file)
@@ -5,7 +5,7 @@
 menuconfig PHYLIB
        tristate "PHY Device support and infrastructure"
        depends on !S390
-       depends on NET_ETHERNET
+       depends on NETDEVICES
        help
          Ethernet controllers are usually attached to PHY
          devices.  This option provides infrastructure for
index 5130db8f5c4ec4b2ced76a400071814c3e638c60..1bb16cb794331ba975eddcc5327f1fc13ef495c9 100644 (file)
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
 /**
  * phy_mii_ioctl - generic PHY MII ioctl interface
  * @phydev: the phy_device struct
- * @mii_data: MII ioctl data
+ * @ifr: &struct ifreq for socket ioctl's
  * @cmd: ioctl cmd to execute
  *
  * Note that this function is currently incompatible with the
index b9615bd745ea5d9175e6d6153d5c924084d550d2..bf6d87adda4fff7a5aa1d1e20728383d08e621f1 100644 (file)
@@ -473,48 +473,58 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
 static int
 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
-       struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+       struct qlcnic_pci_info *pci_info;
        int i, ret = 0, err;
        u8 pfn;
 
-       if (!adapter->npars)
-               adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
-                               QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
-       if (!adapter->npars)
+       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
                return -ENOMEM;
 
-       if (!adapter->eswitch)
-               adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+       adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+                               QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+       if (!adapter->npars) {
+               err = -ENOMEM;
+               goto err_pci_info;
+       }
+
+       adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
                                QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
        if (!adapter->eswitch) {
                err = -ENOMEM;
-               goto err_eswitch;
+               goto err_npars;
        }
 
        ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (!ret) {
-               for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-                       pfn = pci_info[i].id;
-                       if (pfn > QLCNIC_MAX_PCI_FUNC)
-                               return QL_STATUS_INVALID_PARAM;
-                       adapter->npars[pfn].active = pci_info[i].active;
-                       adapter->npars[pfn].type = pci_info[i].type;
-                       adapter->npars[pfn].phy_port = pci_info[i].default_port;
-                       adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
-                       adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
-                       adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
-               }
-
-               for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
-                       adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+       if (ret)
+               goto err_eswitch;
 
-               return ret;
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+               pfn = pci_info[i].id;
+               if (pfn > QLCNIC_MAX_PCI_FUNC)
+                       return QL_STATUS_INVALID_PARAM;
+               adapter->npars[pfn].active = pci_info[i].active;
+               adapter->npars[pfn].type = pci_info[i].type;
+               adapter->npars[pfn].phy_port = pci_info[i].default_port;
+               adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
+               adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+               adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
        }
 
+       for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+               adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+       kfree(pci_info);
+       return 0;
+
+err_eswitch:
        kfree(adapter->eswitch);
        adapter->eswitch = NULL;
-err_eswitch:
+err_npars:
        kfree(adapter->npars);
+       adapter->npars = NULL;
+err_pci_info:
+       kfree(pci_info);
 
        return ret;
 }
@@ -3361,15 +3371,21 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
-       struct qlcnic_pci_info  pci_info[QLCNIC_MAX_PCI_FUNC];
+       struct qlcnic_pci_info *pci_info;
        int i, ret;
 
        if (size != sizeof(pci_cfg))
                return QL_STATUS_INVALID_PARAM;
 
+       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
        ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (ret)
+       if (ret) {
+               kfree(pci_info);
                return ret;
+       }
 
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
                pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3396,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
                memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
        }
        memcpy(buf, &pci_cfg, size);
+       kfree(pci_info);
        return size;
-
 }
 static struct bin_attribute bin_attr_npar_config = {
        .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
index 7f62e2dea28f7423bd7539253e5698ab2094f049..ca7fc9df1ccf900533d56c35326b360316283af2 100644 (file)
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
 static void rx_complete (struct urb *urb);
 
-static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 {
        struct sk_buff          *skb;
        struct skb_data         *entry;
@@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
                usb_free_urb (urb);
-               return;
+               return -ENOMEM;
        }
        skb_reserve (skb, NET_IP_ALIGN);
 
@@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                        netif_dbg(dev, ifdown, dev->net, "device gone\n");
                        netif_device_detach (dev->net);
                        break;
+               case -EHOSTUNREACH:
+                       retval = -ENOLINK;
+                       break;
                default:
                        netif_dbg(dev, rx_err, dev->net,
                                  "rx submit, %d\n", retval);
@@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                dev_kfree_skb_any (skb);
                usb_free_urb (urb);
        }
+       return retval;
 }
 
 
@@ -912,6 +916,7 @@ fail_halt:
        /* tasklet could resubmit itself forever if memory is tight */
        if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
                struct urb      *urb = NULL;
+               int resched = 1;
 
                if (netif_running (dev->net))
                        urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@ fail_halt:
                        status = usb_autopm_get_interface(dev->intf);
                        if (status < 0)
                                goto fail_lowmem;
-                       rx_submit (dev, urb, GFP_KERNEL);
+                       if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+                               resched = 0;
                        usb_autopm_put_interface(dev->intf);
 fail_lowmem:
-                       tasklet_schedule (&dev->bh);
+                       if (resched)
+                               tasklet_schedule (&dev->bh);
                }
        }
 
@@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
                        // don't refill the queue all at once
                        for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
                                urb = usb_alloc_urb (0, GFP_ATOMIC);
-                               if (urb != NULL)
-                                       rx_submit (dev, urb, GFP_ATOMIC);
+                               if (urb != NULL) {
+                                       if (rx_submit (dev, urb, GFP_ATOMIC) ==
+                                           -ENOLINK)
+                                               return;
+                               }
                        }
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
index ad7719fe6d0a2ced65c2db0a27faeeab6550486a..e050bd65e0378cbdd6643d893c01a4462a2dea2f 100644 (file)
@@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
  *      Receive a frame through the DMA
  */
 static inline void
-fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
-          unsigned char *mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+          dma_addr_t mem, int len)
 {
        /*
         * This routine will setup the DMA and start it
         */
 
-       dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+       dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+           (unsigned long) skb, (unsigned long) mem, len);
        if (card->dmarx_in_progress) {
                dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
        }
 
-       outl((unsigned long) skb, card->pci_conf + DMAPADR0);   /* Copy to here */
-       outl((unsigned long) mem, card->pci_conf + DMALADR0);   /* from here */
+       outl(skb, card->pci_conf + DMAPADR0);   /* Copy to here */
+       outl(mem, card->pci_conf + DMALADR0);   /* from here */
        outl(len, card->pci_conf + DMASIZ0);    /* for this length */
        outl(0x00000000c, card->pci_conf + DMADPR0);    /* In this direction */
 
@@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
                card->dma_port_rx = port;
                card->dma_len_rx = len;
                card->dma_rxpos = rxp;
-               fst_rx_dma(card, (char *) card->rx_dma_handle_card,
-                          (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+               fst_rx_dma(card, card->rx_dma_handle_card,
+                          BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
        }
        if (rxp != port->rxpos) {
                dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
index 8848333bc3a9545e8ed6a9dd3d8905bfeee6e33f..fec026212326dce857220196dd312c3b2d5b6c69 100644 (file)
@@ -260,7 +260,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
        .shadow_ram_support = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
index a07310fefcf2d0e56c45d45fabce4251d26e12fc..6950a783913b83561839f20a08657857ee242a86 100644 (file)
@@ -769,22 +769,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
                rts_retry_limit = data_retry_limit;
        tx_cmd->rts_retry_limit = rts_retry_limit;
 
-       if (ieee80211_is_mgmt(fc)) {
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       if (tx_flags & TX_CMD_FLG_RTS_MSK) {
-                               tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                               tx_flags |= TX_CMD_FLG_CTS_MSK;
-                       }
-                       break;
-               default:
-                       break;
-               }
-       }
-
        tx_cmd->rate = rate;
        tx_cmd->tx_flags = tx_flags;
 
@@ -2717,7 +2701,7 @@ static struct iwl_lib_ops iwl3945_lib = {
 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
        .get_hcmd_size = iwl3945_get_hcmd_size,
        .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-       .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+       .tx_cmd_protection = iwlcore_tx_cmd_protection,
        .request_scan = iwl3945_request_scan,
 };
 
index d6531ad3906a5d2a407ff2c5c106a6cf335ead10..d6da356608fa46efe34a317bdda60c6680b65d7e 100644 (file)
@@ -2223,7 +2223,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
        .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
        .chain_noise_reset = iwl4965_chain_noise_reset,
        .gain_computation = iwl4965_gain_computation,
-       .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+       .tx_cmd_protection = iwlcore_tx_cmd_protection,
        .calc_rssi = iwl4965_calc_rssi,
        .request_scan = iwlagn_request_scan,
 };
index 8093ce2804fb31d3fb5e00cbc7f49da19d9543b6..aacf3770f075b4abd1de4a474f79cc60eb42c9cc 100644 (file)
@@ -506,7 +506,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
        .use_bsm = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
@@ -537,7 +537,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
        .use_bsm = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
@@ -597,7 +597,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
        .use_bsm = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
@@ -628,7 +628,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
        .use_bsm = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
@@ -659,7 +659,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
        .use_bsm = false,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
index 58270529a0e4efc4ea3198ce9aa27fc45d1c9707..af4fd50f3405db334ce2e44e1ce648c7c510ed7c 100644 (file)
@@ -381,7 +381,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -489,7 +489,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -563,7 +563,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -637,7 +637,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -714,7 +714,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -821,7 +821,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -859,7 +859,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
@@ -933,7 +933,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
        .led_compensation = 51,
-       .use_rts_for_ht = true, /* use rts/cts protection */
+       .use_rts_for_aggregation = true, /* use rts/cts protection */
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .supports_idle = true,
        .adv_thermal_throttle = true,
index a7216dda97868dd576c02ddee55484e9a3121ac4..75b901b3eb1ebff8cb5a7adbbd380db5cd8bc88f 100644 (file)
@@ -211,10 +211,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
        }
 }
 
-static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
-                       __le32 *tx_flags)
+static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
+                                    struct ieee80211_tx_info *info,
+                                    __le16 fc, __le32 *tx_flags)
 {
-       *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
+           info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+               *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+               return;
+       }
+
+       if (priv->cfg->use_rts_for_aggregation &&
+           info->flags & IEEE80211_TX_CTL_AMPDU) {
+               *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+               return;
+       }
 }
 
 /* Calc max signal level (dBm) among 3 possible receivers */
@@ -268,7 +279,7 @@ struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
        .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
        .gain_computation = iwlagn_gain_computation,
        .chain_noise_reset = iwlagn_chain_noise_reset,
-       .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
+       .tx_cmd_protection = iwlagn_tx_cmd_protection,
        .calc_rssi = iwlagn_calc_rssi,
        .request_scan = iwlagn_request_scan,
 };
index d04502d54df3c48b0d869c5169bf79ee397ecf1e..69155aa448fb80f8c042b212567679b905e20513 100644 (file)
@@ -379,10 +379,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
-       priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
-       if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
-               tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
 
        tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
        if (ieee80211_is_mgmt(fc)) {
@@ -456,21 +453,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
        if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
                rate_flags |= RATE_MCS_CCK_MSK;
 
-       /* Set up RTS and CTS flags for certain packets */
-       switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-       case cpu_to_le16(IEEE80211_STYPE_AUTH):
-       case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-       case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-       case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-               if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
-                       tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                       tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
-               }
-               break;
-       default:
-               break;
-       }
-
        /* Set up antennas */
        priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
                                              priv->hw_params.valid_tx_ant);
index 35337b1e7caceeac3b14ee0c4a2ea3a660010989..c1882fd8345d43dd630718c1f9475533511e5f7c 100644 (file)
@@ -202,13 +202,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 
        priv->start_calib = 0;
        if (new_assoc) {
-               /*
-                * allow CTS-to-self if possible for new association.
-                * this is relevant only for 5000 series and up,
-                * but will not damage 4965
-                */
-               priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
-
                /* Apply the new configuration
                 * RXON assoc doesn't clear the station table in uCode,
                 */
@@ -1618,45 +1611,9 @@ static ssize_t store_tx_power(struct device *d,
 
 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
 
-static ssize_t show_rts_ht_protection(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       return sprintf(buf, "%s\n",
-               priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
-}
-
-static ssize_t store_rts_ht_protection(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       unsigned long val;
-       int ret;
-
-       ret = strict_strtoul(buf, 10, &val);
-       if (ret)
-               IWL_INFO(priv, "Input is not in decimal form.\n");
-       else {
-               if (!iwl_is_associated(priv))
-                       priv->cfg->use_rts_for_ht = val ? true : false;
-               else
-                       IWL_ERR(priv, "Sta associated with AP - "
-                               "Change protection mechanism is not allowed\n");
-               ret = count;
-       }
-       return ret;
-}
-
-static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
-                       show_rts_ht_protection, store_rts_ht_protection);
-
-
 static struct attribute *iwl_sysfs_entries[] = {
        &dev_attr_temperature.attr,
        &dev_attr_tx_power.attr,
-       &dev_attr_rts_ht_protection.attr,
 #ifdef CONFIG_IWLWIFI_DEBUG
        &dev_attr_debug_level.attr,
 #endif
@@ -3464,25 +3421,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        return ret;
 }
 
-/*
- * switch to RTS/CTS for TX
- */
-static void iwl_enable_rts_cts(struct iwl_priv *priv)
-{
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
-       if (!test_bit(STATUS_SCANNING, &priv->status)) {
-               IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
-               iwlcore_commit_rxon(priv);
-       } else {
-               /* scanning, defer the request until scan completed */
-               IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
-       }
-}
-
 static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif,
                                enum ieee80211_ampdu_mlme_action action,
@@ -3529,14 +3467,33 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
                }
                if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                        ret = 0;
+               if (priv->cfg->use_rts_for_aggregation) {
+                       struct iwl_station_priv *sta_priv =
+                               (void *) sta->drv_priv;
+                       /*
+                        * switch off RTS/CTS if it was previously enabled
+                        */
+
+                       sta_priv->lq_sta.lq.general_params.flags &=
+                               ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+                       iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
+                               CMD_ASYNC, false);
+               }
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
-               if (priv->cfg->use_rts_for_ht) {
+               if (priv->cfg->use_rts_for_aggregation) {
+                       struct iwl_station_priv *sta_priv =
+                               (void *) sta->drv_priv;
+
                        /*
                         * switch to RTS/CTS if it is the prefer protection
                         * method for HT traffic
                         */
-                       iwl_enable_rts_cts(priv);
+
+                       sta_priv->lq_sta.lq.general_params.flags |=
+                               LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+                       iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
+                               CMD_ASYNC, false);
                }
                ret = 0;
                break;
index 8ccb6d205b6d6219c5e23c38806113f8514ef3ac..2c03c6e20a72d73ded70f8c96d37bfce6aa8e263 100644 (file)
@@ -401,21 +401,38 @@ void iwlcore_free_geos(struct iwl_priv *priv)
 EXPORT_SYMBOL(iwlcore_free_geos);
 
 /*
- *  iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
+ *  iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
  *  function.
  */
-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
-                               __le32 *tx_flags)
+void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
+                              struct ieee80211_tx_info *info,
+                              __le16 fc, __le32 *tx_flags)
 {
        if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
                *tx_flags |= TX_CMD_FLG_RTS_MSK;
                *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+               if (!ieee80211_is_mgmt(fc))
+                       return;
+
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
+                       break;
+               }
        } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
                *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
                *tx_flags |= TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
        }
 }
-EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
+EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
+
 
 static bool is_single_rx_stream(struct iwl_priv *priv)
 {
@@ -1869,6 +1886,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
                        priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
                else
                        priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+               if (bss_conf->use_cts_prot)
+                       priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
+               else
+                       priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
        }
 
        if (changes & BSS_CHANGED_BASIC_RATES) {
index e9d23f2f869d8c72a81c00b4890e215518e058ff..4a71dfb10a15d0f6ebf4007944d011d249333902 100644 (file)
@@ -104,8 +104,9 @@ struct iwl_hcmd_utils_ops {
                        u32 min_average_noise,
                        u8 default_chain);
        void (*chain_noise_reset)(struct iwl_priv *priv);
-       void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
-                       __le32 *tx_flags);
+       void (*tx_cmd_protection)(struct iwl_priv *priv,
+                                 struct ieee80211_tx_info *info,
+                                 __le16 fc, __le32 *tx_flags);
        int  (*calc_rssi)(struct iwl_priv *priv,
                          struct iwl_rx_phy_res *rx_resp);
        void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
@@ -249,7 +250,7 @@ struct iwl_mod_params {
  * @led_compensation: compensate on the led on/off time per HW according
  *     to the deviation to achieve the desired led frequency.
  *     The detail algorithm is described in iwl-led.c
- * @use_rts_for_ht: use rts/cts protection for HT traffic
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
  * @chain_noise_num_beacons: number of beacons used to compute chain noise
  * @adv_thermal_throttle: support advance thermal throttle
  * @support_ct_kill_exit: support ct kill exit condition
@@ -318,7 +319,7 @@ struct iwl_cfg {
        const bool ht_greenfield_support;
        u16 led_compensation;
        const bool broken_powersave;
-       bool use_rts_for_ht;
+       bool use_rts_for_aggregation;
        int chain_noise_num_beacons;
        const bool supports_idle;
        bool adv_thermal_throttle;
@@ -390,8 +391,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
 void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
 int iwl_alloc_txq_mem(struct iwl_priv *priv);
 void iwl_free_txq_mem(struct iwl_priv *priv);
-void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
-                               __le32 *tx_flags);
+void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
+                              struct ieee80211_tx_info *info,
+                              __le16 fc, __le32 *tx_flags);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
 void iwl_free_traffic_mem(struct iwl_priv *priv);
index d24eb47d370548b73036411c3532443e2d149c28..70c4b8fba0ee89093c56c056e7152b927da5a37c 100644 (file)
@@ -435,10 +435,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
                tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
-       priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
-       if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
-               tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
 
        tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
        if (ieee80211_is_mgmt(fc)) {
index 2372abb29c2e1e1a56050cf4a1a07001ab6fbdfd..3e82f162720972d8c6bd732fa437c7fc7046bde6 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/ieee80211.h>
 #include <net/cfg80211.h>
 #include <asm/unaligned.h>
index 71a101fb2e4ecad6639d9e878fcdac3e68224f34..822f8dc26e9c051d3b9ee34e79ef715b0f204067 100644 (file)
@@ -43,8 +43,6 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
        { PCI_DEVICE(0x1260, 0x3886) },
        /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
        { PCI_DEVICE(0x1260, 0xffff) },
-       /* Standard Microsystems Corp SMC2802W Wireless PCI */
-       { PCI_DEVICE(0x10b8, 0x2802) },
        { },
 };
 
index a75ed3083a6ae266d66b9630140b42db8d07266e..8e4153d740f3bf00d4a465bf5a1526ee66ad518e 100644 (file)
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
         struct chbk *p_ch;
 
        CLAW_DBF_TEXT(4, trace, "claw_tx");
-        p_ch=&privptr->channel[WRITE];
+       p_ch = &privptr->channel[WRITE_CHANNEL];
         spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
         rc=claw_hw_tx( skb, dev, 1 );
         spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@ static struct sk_buff *
 claw_pack_skb(struct claw_privbk *privptr)
 {
        struct sk_buff *new_skb,*held_skb;
-       struct chbk *p_ch = &privptr->channel[WRITE];
+       struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
        struct claw_env  *p_env = privptr->p_env;
        int     pkt_cnt,pk_ind,so_far;
 
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
                privptr->p_env->write_size=CLAW_FRAME_SIZE;
        }
         claw_set_busy(dev);
-       tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
-               (unsigned long) &privptr->channel[READ]);
+       tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+               (unsigned long) &privptr->channel[READ_CHANNEL]);
         for ( i = 0; i < 2;  i++) {
                CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
                 init_waitqueue_head(&privptr->channel[i].wait);
                /* skb_queue_head_init(&p_ch->io_queue); */
-               if (i == WRITE)
+               if (i == WRITE_CHANNEL)
                        skb_queue_head_init(
-                               &privptr->channel[WRITE].collect_queue);
+                               &privptr->channel[WRITE_CHANNEL].collect_queue);
                 privptr->channel[i].flag_a = 0;
                 privptr->channel[i].IO_active = 0;
                 privptr->channel[i].flag  &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
                 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
                         del_timer(&timer);
         }
-        if ((((privptr->channel[READ].last_dstat |
-               privptr->channel[WRITE].last_dstat) &
+       if ((((privptr->channel[READ_CHANNEL].last_dstat |
+               privptr->channel[WRITE_CHANNEL].last_dstat) &
            ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
-           (((privptr->channel[READ].flag |
-               privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
-               dev_info(&privptr->channel[READ].cdev->dev,
+          (((privptr->channel[READ_CHANNEL].flag |
+               privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+               dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
                        "%s: remote side is not ready\n", dev->name);
                CLAW_DBF_TEXT(2, trace, "notrdy");
 
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
                         }
                 }
                privptr->buffs_alloc = 0;
-               privptr->channel[READ].flag= 0x00;
-               privptr->channel[WRITE].flag = 0x00;
+               privptr->channel[READ_CHANNEL].flag = 0x00;
+               privptr->channel[WRITE_CHANNEL].flag = 0x00;
                 privptr->p_buff_ccw=NULL;
                 privptr->p_buff_read=NULL;
                 privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
         }
 
        /* Try to extract channel from driver data. */
-       if (privptr->channel[READ].cdev == cdev)
-               p_ch = &privptr->channel[READ];
-       else if (privptr->channel[WRITE].cdev == cdev)
-               p_ch = &privptr->channel[WRITE];
+       if (privptr->channel[READ_CHANNEL].cdev == cdev)
+               p_ch = &privptr->channel[READ_CHANNEL];
+       else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+               p_ch = &privptr->channel[WRITE_CHANNEL];
        else {
                dev_warn(&cdev->dev, "The device is not a CLAW device\n");
                CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
                        claw_clearbit_busy(TB_TX, dev);
                        claw_clear_busy(dev);
                }
-               p_ch_r = (struct chbk *)&privptr->channel[READ];
+               p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
                if (test_and_set_bit(CLAW_BH_ACTIVE,
                        (void *)&p_ch_r->flag_a) == 0)
                        tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
         for ( i = 1; i >=0 ;  i--) {
                 spin_lock_irqsave(
                        get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
-             /*   del_timer(&privptr->channel[READ].timer);  */
+            /*   del_timer(&privptr->channel[READ_CHANNEL].timer);  */
                privptr->channel[i].claw_state = CLAW_STOP;
                 privptr->channel[i].IO_active = 0;
                 parm = (unsigned long) &privptr->channel[i];
-               if (i == WRITE)
+               if (i == WRITE_CHANNEL)
                        claw_purge_skb_queue(
-                               &privptr->channel[WRITE].collect_queue);
+                               &privptr->channel[WRITE_CHANNEL].collect_queue);
                 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
                if (privptr->system_validate_comp==0x00)  /* never opened? */
                    init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
         privptr->mtc_skipping = 1;
         privptr->mtc_offset=0;
 
-        if (((privptr->channel[READ].last_dstat |
-               privptr->channel[WRITE].last_dstat) &
+       if (((privptr->channel[READ_CHANNEL].last_dstat |
+               privptr->channel[WRITE_CHANNEL].last_dstat) &
                ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
-               dev_warn(&privptr->channel[READ].cdev->dev,
+               dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
                        "Deactivating %s completed with incorrect"
                        " subchannel status "
                        "(read %02x, write %02x)\n",
                 dev->name,
-               privptr->channel[READ].last_dstat,
-               privptr->channel[WRITE].last_dstat);
+               privptr->channel[READ_CHANNEL].last_dstat,
+               privptr->channel[WRITE_CHANNEL].last_dstat);
                 CLAW_DBF_TEXT(2, trace, "badclose");
         }
        CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
 
        CLAW_DBF_TEXT(4, trace, "hw_tx");
        privptr = (struct claw_privbk *)(dev->ml_priv);
-        p_ch=(struct chbk *)&privptr->channel[WRITE];
+       p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
        p_env =privptr->p_env;
        claw_free_wrt_buf(dev); /* Clean up free chain if posible */
         /*  scan the write queue to free any completed write packets   */
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
                                 claw_strt_out_IO(dev );
                                 claw_free_wrt_buf( dev );
                                 if (privptr->write_free_count==0) {
-                                       ch = &privptr->channel[WRITE];
+                                       ch = &privptr->channel[WRITE_CHANNEL];
                                        atomic_inc(&skb->users);
                                        skb_queue_tail(&ch->collect_queue, skb);
                                        goto Done;
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
                 }
                 /*  tx lock  */
                 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
-                       ch = &privptr->channel[WRITE];
+                       ch = &privptr->channel[WRITE_CHANNEL];
                        atomic_inc(&skb->users);
                        skb_queue_tail(&ch->collect_queue, skb);
                         claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
             privptr->p_write_free_chain == NULL ) {
 
                 claw_setbit_busy(TB_NOBUFFER,dev);
-               ch = &privptr->channel[WRITE];
+               ch = &privptr->channel[WRITE_CHANNEL];
                atomic_inc(&skb->users);
                skb_queue_tail(&ch->collect_queue, skb);
                CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
         while (len_of_data > 0) {
                 p_this_ccw=privptr->p_write_free_chain;  /* get a block */
                if (p_this_ccw == NULL) { /* lost the race */
-                       ch = &privptr->channel[WRITE];
+                       ch = &privptr->channel[WRITE_CHANNEL];
                        atomic_inc(&skb->users);
                        skb_queue_tail(&ch->collect_queue, skb);
                        goto Done2;
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                        *catch up to each other */
        privptr = dev->ml_priv;
         p_env=privptr->p_env;
-       tdev = &privptr->channel[READ].cdev->dev;
+       tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
        memcpy( &temp_host_name, p_env->host_name, 8);
         memcpy( &temp_ws_name, p_env->adapter_name , 8);
        dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                        dev->name, temp_ws_name,
                        p_ctlbk->linkid);
                        privptr->active_link_ID = p_ctlbk->linkid;
-                       p_ch = &privptr->channel[WRITE];
+                       p_ch = &privptr->channel[WRITE_CHANNEL];
                        wake_up(&p_ch->wait);  /* wake up claw_open ( WRITE) */
                break;
        case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                                "%s: Confirmed Now packing\n", dev->name);
                                p_env->packing = DO_PACKED;
                        }
-                       p_ch = &privptr->channel[WRITE];
+                       p_ch = &privptr->channel[WRITE_CHANNEL];
                        wake_up(&p_ch->wait);
                } else {
                        dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
        p_packd=NULL;
        privptr = dev->ml_priv;
 
-       p_dev = &privptr->channel[READ].cdev->dev;
+       p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
        p_env = privptr->p_env;
         p_this_ccw=privptr->p_read_active_first;
        while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
         struct ccwbk*p_ccwbk;
         struct chbk *p_ch;
         struct clawh *p_clawh;
-        p_ch=&privptr->channel[READ];
+       p_ch = &privptr->channel[READ_CHANNEL];
 
        CLAW_DBF_TEXT(4, trace, "StRdNter");
         p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
                return;
        }
        privptr = (struct claw_privbk *)dev->ml_priv;
-        p_ch=&privptr->channel[WRITE];
+       p_ch = &privptr->channel[WRITE_CHANNEL];
 
        CLAW_DBF_TEXT(4, trace, "strt_io");
         p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
        if (dev->flags & IFF_RUNNING)
                claw_release(dev);
        if (privptr) {
-               privptr->channel[READ].ndev = NULL;  /* say it's free */
+               privptr->channel[READ_CHANNEL].ndev = NULL;  /* say it's free */
        }
        dev->ml_priv = NULL;
 #ifdef MODULE
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
        struct ccw_dev_id dev_id;
 
        dev_info(&cgdev->dev, "add for %s\n",
-                dev_name(&cgdev->cdev[READ]->dev));
+                dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
        CLAW_DBF_TEXT(2, setup, "new_dev");
        privptr = dev_get_drvdata(&cgdev->dev);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
        if (!privptr)
                return -ENODEV;
        p_env = privptr->p_env;
-       ccw_device_get_id(cgdev->cdev[READ], &dev_id);
-       p_env->devno[READ] = dev_id.devno;
-       ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
-       p_env->devno[WRITE] = dev_id.devno;
+       ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+       p_env->devno[READ_CHANNEL] = dev_id.devno;
+       ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+       p_env->devno[WRITE_CHANNEL] = dev_id.devno;
        ret = add_channel(cgdev->cdev[0],0,privptr);
        if (ret == 0)
                ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        " failed with error code %d\n", ret);
                goto out;
        }
-       ret = ccw_device_set_online(cgdev->cdev[READ]);
+       ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
        if (ret != 0) {
                dev_warn(&cgdev->dev,
                        "Setting the read subchannel online"
                        " failed with error code %d\n", ret);
                goto out;
        }
-       ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+       ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
        if (ret != 0) {
                dev_warn(&cgdev->dev,
                        "Setting the write subchannel online "
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
        }
        dev->ml_priv = privptr;
        dev_set_drvdata(&cgdev->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
        /* sysfs magic */
         SET_NETDEV_DEV(dev, &cgdev->dev);
        if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        goto out;
                }
        }
-       privptr->channel[READ].ndev = dev;
-       privptr->channel[WRITE].ndev = dev;
+       privptr->channel[READ_CHANNEL].ndev = dev;
+       privptr->channel[WRITE_CHANNEL].ndev = dev;
        privptr->p_env->ndev = dev;
 
        dev_info(&cgdev->dev, "%s:readsize=%d  writesize=%d "
                "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
                 dev->name, p_env->read_size,
                p_env->write_size, p_env->read_buffers,
-                p_env->write_buffers, p_env->devno[READ],
-               p_env->devno[WRITE]);
+               p_env->write_buffers, p_env->devno[READ_CHANNEL],
+               p_env->devno[WRITE_CHANNEL]);
        dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
                ":%.8s api_type: %.8s\n",
                 dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
        priv = dev_get_drvdata(&cgdev->dev);
        if (!priv)
                return -ENODEV;
-       ndev = priv->channel[READ].ndev;
+       ndev = priv->channel[READ_CHANNEL].ndev;
        if (ndev) {
                /* Close the device */
-               dev_info(&cgdev->dev, "%s: shutting down \n",
+               dev_info(&cgdev->dev, "%s: shutting down\n",
                        ndev->name);
                if (ndev->flags & IFF_RUNNING)
                        ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
                unregister_netdev(ndev);
                ndev->ml_priv = NULL;  /* cgdev data, not ndev's to free */
                claw_free_netdevice(ndev, 1);
-               priv->channel[READ].ndev = NULL;
-               priv->channel[WRITE].ndev = NULL;
+               priv->channel[READ_CHANNEL].ndev = NULL;
+               priv->channel[WRITE_CHANNEL].ndev = NULL;
                priv->p_env->ndev = NULL;
        }
        ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
        priv->channel[1].irb=NULL;
        kfree(priv);
        dev_set_drvdata(&cgdev->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
        put_device(&cgdev->dev);
 
        return;
index 46d59a13db12d1aabdaf87d497dbe2f3d1c02d55..1bc5904df19ff550d3094ed939faab28f5cac44c 100644 (file)
@@ -74,8 +74,8 @@
 #define MAX_ENVELOPE_SIZE       65536
 #define CLAW_DEFAULT_MTU_SIZE   4096
 #define DEF_PACK_BUFSIZE       32768
-#define READ                    0
-#define WRITE                   1
+#define READ_CHANNEL           0
+#define WRITE_CHANNEL          1
 
 #define TB_TX                   0          /* sk buffer handling in process  */
 #define TB_STOP                 1          /* network device stop in process */
index 70eb7f1384146e4138e5853534a87966cf79fb5a..8c921fc3511a5e741b187e9f32e763f004b11427 100644 (file)
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
        if ((fsmstate == CTC_STATE_SETUPWAIT) &&
            (ch->protocol == CTCM_PROTO_OS390)) {
                /* OS/390 resp. z/OS */
-               if (CHANNEL_DIRECTION(ch->flags) == READ) {
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                        *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
                        fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
                                     CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
         * if in compatibility mode, since VM TCP delays the initial
         * frame until it has some data to send.
         */
-       if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+       if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
            (ch->protocol != CTCM_PROTO_S390))
                fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 
        *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
        ch->ccw[1].count = 2;   /* Transfer only length */
 
-       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
        rc = ccw_device_start(ch->cdev, &ch->ccw[0],
                                        (unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
         * reply from VM TCP which brings up the RX channel to it's
         * final state.
         */
-       if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+       if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
            (ch->protocol == CTCM_PROTO_S390)) {
                struct net_device *dev = ch->netdev;
                struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
        int rc;
 
        CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
-                       CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+               CTCM_FUNTAIL, ch->id,
+               (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
 
        if (ch->trans_skb != NULL) {
                clear_normalized_cda(&ch->ccw[1]);
                dev_kfree_skb(ch->trans_skb);
                ch->trans_skb = NULL;
        }
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                ch->ccw[1].cmd_code = CCW_CMD_READ;
                ch->ccw[1].flags = CCW_FLAG_SLI;
                ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
                        "%s(%s): %s trans_skb alloc delayed "
                        "until first transfer",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
        }
        ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
        ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
 
        ch->th_seg = 0x00;
        ch->th_seq_num = 0x00;
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                skb_queue_purge(&ch->io_queue);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
                fsm_newstate(fi, CTC_STATE_STARTRETRY);
                fsm_deltimer(&ch->timer);
                fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
-               if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
+               if (!IS_MPC(ch) &&
+                   (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
                        int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
                        if (rc != 0)
                                ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
        CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
                "%s(%s) : %s error during %s channel setup state=%s\n",
                CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
-               (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+               (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
                fsm_getstate_str(fi));
 
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                fsm_newstate(fi, CTC_STATE_RXERR);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
        fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 
        fsm_newstate(fi, CTC_STATE_DTERM);
-       ch2 = priv->channel[WRITE];
+       ch2 = priv->channel[CTCM_WRITE];
        fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
 
        ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
        fsm_deltimer(&ch->timer);
        CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                "%s: %s: %s unrecoverable channel error",
-                       CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
+                       CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
 
        if (IS_MPC(ch)) {
                priv->stats.tx_dropped++;
                priv->stats.tx_errors++;
        }
-       if (rd == READ) {
+       if (rd == CTCM_READ) {
                fsm_newstate(fi, CTC_STATE_RXERR);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
        switch (fsm_getstate(fi)) {
        case CTC_STATE_STARTRETRY:
        case CTC_STATE_SETUPWAIT:
-               if (CHANNEL_DIRECTION(ch->flags) == READ) {
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                        ctcmpc_chx_rxidle(fi, event, arg);
                } else {
                        fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
                break;
        };
 
-       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
 
 done:
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
        struct net_device *dev = ach->netdev;
        struct ctcm_priv *priv = dev->ml_priv;
        struct mpc_group *grp = priv->mpcg;
-       struct channel *wch = priv->channel[WRITE];
-       struct channel *rch = priv->channel[READ];
+       struct channel *wch = priv->channel[CTCM_WRITE];
+       struct channel *rch = priv->channel[CTCM_READ];
        struct sk_buff *skb;
        struct th_sweep *header;
        int rc = 0;
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
        fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
        if (IS_MPC(priv))
                priv->mpcg->channels_terminating = 0;
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                fsm_event(ch->fsm, CTC_EVENT_START, ch);
        }
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
        CTCMY_DBF_DEV_NAME(SETUP, dev, "");
 
        fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
                ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
 
        if (IS_MPC(priv)) {
                if (event == DEV_EVENT_RXUP)
-                       mpc_channel_action(priv->channel[READ],
-                               READ, MPC_CHANNEL_ADD);
+                       mpc_channel_action(priv->channel[CTCM_READ],
+                               CTCM_READ, MPC_CHANNEL_ADD);
                else
-                       mpc_channel_action(priv->channel[WRITE],
-                               WRITE, MPC_CHANNEL_ADD);
+                       mpc_channel_action(priv->channel[CTCM_WRITE],
+                               CTCM_WRITE, MPC_CHANNEL_ADD);
        }
 }
 
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
        }
        if (IS_MPC(priv)) {
                if (event == DEV_EVENT_RXDOWN)
-                       mpc_channel_action(priv->channel[READ],
-                               READ, MPC_CHANNEL_REMOVE);
+                       mpc_channel_action(priv->channel[CTCM_READ],
+                               CTCM_READ, MPC_CHANNEL_REMOVE);
                else
-                       mpc_channel_action(priv->channel[WRITE],
-                               WRITE, MPC_CHANNEL_REMOVE);
+                       mpc_channel_action(priv->channel[CTCM_WRITE],
+                               CTCM_WRITE, MPC_CHANNEL_REMOVE);
        }
 }
 
index 4ecafbf91211b72775315a95e20a322e0e11e834..6edf20b62de5bae28214275931f0db9fd1fcd1f2 100644 (file)
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
                else {
                        ch->flags |= CHANNEL_FLAGS_INUSE;
                        ch->flags &= ~CHANNEL_FLAGS_RWMASK;
-                       ch->flags |= (direction == WRITE)
+                       ch->flags |= (direction == CTCM_WRITE)
                            ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
                        fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
                }
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                        "%s(%s): %s trans_skb allocation error",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
                return -ENOMEM;
        }
 
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                        "%s(%s): %s set norm_cda failed",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
                return -ENOMEM;
        }
 
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
 
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       ch = priv->channel[WRITE];
+       ch = priv->channel[CTCM_WRITE];
 
        /* sweep processing is not complete until response and request */
        /* has completed for all read channels in group                */
        if (grp->in_sweep == 0) {
                grp->in_sweep = 1;
-               grp->sweep_rsp_pend_num = grp->active_channels[READ];
-               grp->sweep_req_pend_num = grp->active_channels[READ];
+               grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+               grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
        }
 
        sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
 
        dev->trans_start = jiffies;
-       if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
+       if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
                return NETDEV_TX_BUSY;
        return NETDEV_TX_OK;
 }
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
        }
 
        dev->trans_start = jiffies;
-       if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
+       if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
                CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
                        "%s(%s): device error - dropped",
                                        CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
 
        priv = dev->ml_priv;
-       max_bufsize = priv->channel[READ]->max_bufsize;
+       max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
 
        if (IS_MPC(priv)) {
                if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
        priv = dev_get_drvdata(&cgdev->dev);
 
        /* Try to extract channel from driver data. */
-       if (priv->channel[READ]->cdev == cdev)
-               ch = priv->channel[READ];
-       else if (priv->channel[WRITE]->cdev == cdev)
-               ch = priv->channel[WRITE];
+       if (priv->channel[CTCM_READ]->cdev == cdev)
+               ch = priv->channel[CTCM_READ];
+       else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+               ch = priv->channel[CTCM_WRITE];
        else {
                dev_err(&cdev->dev,
                        "%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
                goto out_ccw2;
        }
 
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                priv->channel[direction] =
-                   channel_get(type, direction == READ ? read_id : write_id,
-                               direction);
+                       channel_get(type, direction == CTCM_READ ?
+                               read_id : write_id, direction);
                if (priv->channel[direction] == NULL) {
-                       if (direction == WRITE)
-                               channel_free(priv->channel[READ]);
+                       if (direction == CTCM_WRITE)
+                               channel_free(priv->channel[CTCM_READ]);
                        goto out_dev;
                }
                priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 
        dev_info(&dev->dev,
                "setup OK : r/w = %s/%s, protocol : %d\n",
-                       priv->channel[READ]->id,
-                       priv->channel[WRITE]->id, priv->protocol);
+                       priv->channel[CTCM_READ]->id,
+                       priv->channel[CTCM_WRITE]->id, priv->protocol);
 
        CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
                "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
-                       priv->channel[READ]->id,
-                       priv->channel[WRITE]->id, priv->protocol);
+                       priv->channel[CTCM_READ]->id,
+                       priv->channel[CTCM_WRITE]->id, priv->protocol);
 
        return 0;
 out_unregister:
@@ -1635,10 +1637,10 @@ out_ccw2:
 out_ccw1:
        ccw_device_set_offline(cgdev->cdev[0]);
 out_remove_channel2:
-       readc = channel_get(type, read_id, READ);
+       readc = channel_get(type, read_id, CTCM_READ);
        channel_remove(readc);
 out_remove_channel1:
-       writec = channel_get(type, write_id, WRITE);
+       writec = channel_get(type, write_id, CTCM_WRITE);
        channel_remove(writec);
 out_err_result:
        return result;
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
        if (!priv)
                return -ENODEV;
 
-       if (priv->channel[READ]) {
-               dev = priv->channel[READ]->netdev;
+       if (priv->channel[CTCM_READ]) {
+               dev = priv->channel[CTCM_READ]->netdev;
                CTCM_DBF_DEV(SETUP, dev, "");
                /* Close the device */
                ctcm_close(dev);
                dev->flags &= ~IFF_RUNNING;
                ctcm_remove_attributes(&cgdev->dev);
-               channel_free(priv->channel[READ]);
+               channel_free(priv->channel[CTCM_READ]);
        } else
                dev = NULL;
 
-       if (priv->channel[WRITE])
-               channel_free(priv->channel[WRITE]);
+       if (priv->channel[CTCM_WRITE])
+               channel_free(priv->channel[CTCM_WRITE]);
 
        if (dev) {
                unregister_netdev(dev);
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
        ccw_device_set_offline(cgdev->cdev[1]);
        ccw_device_set_offline(cgdev->cdev[0]);
 
-       if (priv->channel[READ])
-               channel_remove(priv->channel[READ]);
-       if (priv->channel[WRITE])
-               channel_remove(priv->channel[WRITE]);
-       priv->channel[READ] = priv->channel[WRITE] = NULL;
+       if (priv->channel[CTCM_READ])
+               channel_remove(priv->channel[CTCM_READ]);
+       if (priv->channel[CTCM_WRITE])
+               channel_remove(priv->channel[CTCM_WRITE]);
+       priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
 
        return 0;
 
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
 
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
-       netif_device_detach(priv->channel[READ]->netdev);
-       ctcm_close(priv->channel[READ]->netdev);
+       netif_device_detach(priv->channel[CTCM_READ]->netdev);
+       ctcm_close(priv->channel[CTCM_READ]->netdev);
        if (!wait_event_timeout(priv->fsm->wait_q,
            fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
-               netif_device_attach(priv->channel[READ]->netdev);
+               netif_device_attach(priv->channel[CTCM_READ]->netdev);
                return -EBUSY;
        }
        ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
        rc = ccw_device_set_online(gdev->cdev[0]);
        if (rc)
                goto err_out;
-       ctcm_open(priv->channel[READ]->netdev);
+       ctcm_open(priv->channel[CTCM_READ]->netdev);
 err_out:
-       netif_device_attach(priv->channel[READ]->netdev);
+       netif_device_attach(priv->channel[CTCM_READ]->netdev);
        return rc;
 }
 
index d34fa14f44e767c7ddf866b2d325406c48607ce2..24d5215eb0c40af4816115579bde366149eb159f 100644 (file)
@@ -111,8 +111,8 @@ enum ctcm_channel_types {
 
 #define CTCM_INITIAL_BLOCKLEN  2
 
-#define READ                   0
-#define WRITE                  1
+#define CTCM_READ              0
+#define CTCM_WRITE             1
 
 #define CTCM_ID_SIZE           20+3
 
index 87c24d2936d6e34d1de09da7ebe27aff148d8f61..2861e78773cb5f0e4ee319e4cc0928a00953c717 100644 (file)
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
                return;
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       rch = priv->channel[READ];
-       wch = priv->channel[WRITE];
+       rch = priv->channel[CTCM_READ];
+       wch = priv->channel[CTCM_WRITE];
 
        CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
                        "%s(%s): state=%s",
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
                        "%s: %s: flowc = %d",
                                CTCM_FUNTAIL, dev->name, flowc);
 
-       rch = priv->channel[READ];
+       rch = priv->channel[CTCM_READ];
 
        mpcg_state = fsm_getstate(grp->fsm);
        switch (flowc) {
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
        struct net_device *dev = rch->netdev;
        struct ctcm_priv   *priv = dev->ml_priv;
        struct mpc_group  *grp = priv->mpcg;
-       struct channel    *ch = priv->channel[WRITE];
+       struct channel    *ch = priv->channel[CTCM_WRITE];
 
        CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
        CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
        int rc = 0;
        struct th_sweep *header;
        struct sk_buff *sweep_skb;
-       struct channel *ch  = priv->channel[WRITE];
+       struct channel *ch  = priv->channel[CTCM_WRITE];
 
        CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
 
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
        struct net_device *dev     = rch->netdev;
        struct ctcm_priv  *priv = dev->ml_priv;
        struct mpc_group  *grp  = priv->mpcg;
-       struct channel    *ch      = priv->channel[WRITE];
+       struct channel    *ch      = priv->channel[CTCM_WRITE];
 
        if (do_debug)
                CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
        if (grp->in_sweep == 0) {
                grp->in_sweep = 1;
                ctcm_test_and_set_busy(dev);
-               grp->sweep_req_pend_num = grp->active_channels[READ];
-               grp->sweep_rsp_pend_num = grp->active_channels[READ];
+               grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+               grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
        }
 
        CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
        fsm_newstate(grp->fsm, MPCG_STATE_READY);
 
        /* Put up a read on the channel */
-       ch = priv->channel[READ];
+       ch = priv->channel[CTCM_READ];
        ch->pdu_seq = 0;
        CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
                        __func__, ch->pdu_seq);
 
        ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
        /* Put the write channel in idle state */
-       ch = priv->channel[WRITE];
+       ch = priv->channel[CTCM_WRITE];
        if (ch->collect_len > 0) {
                spin_lock(&ch->collect_lock);
                ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                "%s: %i / Grp:%s total_channels=%i, active_channels: "
                "read=%i, write=%i\n", __func__, action,
                fsm_getstate_str(grp->fsm), grp->num_channel_paths,
-               grp->active_channels[READ], grp->active_channels[WRITE]);
+               grp->active_channels[CTCM_READ],
+               grp->active_channels[CTCM_WRITE]);
 
        if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
                grp->num_channel_paths++;
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                                grp->xid_skb->data,
                                grp->xid_skb->len);
 
-               ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
+               ch->xid->xid2_dlc_type =
+                       ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                                ? XID2_READ_SIDE : XID2_WRITE_SIDE);
 
-               if (CHANNEL_DIRECTION(ch->flags) == WRITE)
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
                        ch->xid->xid2_buf_len = 0x00;
 
                ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
 
                fsm_newstate(ch->fsm, CH_XID0_PENDING);
 
-               if ((grp->active_channels[READ]  > 0) &&
-                   (grp->active_channels[WRITE] > 0) &&
+               if ((grp->active_channels[CTCM_READ] > 0) &&
+                   (grp->active_channels[CTCM_WRITE] > 0) &&
                        (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
                        fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
                        CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                if (grp->channels_terminating)
                                        goto done;
 
-               if (((grp->active_channels[READ] == 0) &&
-                                       (grp->active_channels[WRITE] > 0))
-                       || ((grp->active_channels[WRITE] == 0) &&
-                                       (grp->active_channels[READ] > 0)))
+               if (((grp->active_channels[CTCM_READ] == 0) &&
+                                       (grp->active_channels[CTCM_WRITE] > 0))
+                       || ((grp->active_channels[CTCM_WRITE] == 0) &&
+                                       (grp->active_channels[CTCM_READ] > 0)))
                        fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
        }
 done:
@@ -1038,7 +1040,8 @@ done:
                "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
                "read=%i, write=%i\n", __func__, action,
                fsm_getstate_str(grp->fsm), grp->num_channel_paths,
-               grp->active_channels[READ], grp->active_channels[WRITE]);
+               grp->active_channels[CTCM_READ],
+               grp->active_channels[CTCM_WRITE]);
 
        CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
 }
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
                (grp->port_persist == 0))
                fsm_deltimer(&priv->restart_timer);
 
-       wch = priv->channel[WRITE];
-       rch = priv->channel[READ];
+       wch = priv->channel[CTCM_WRITE];
+       rch = priv->channel[CTCM_READ];
 
        switch (grp->saved_state) {
        case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
 
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       wch = priv->channel[WRITE];
-       rch = priv->channel[READ];
+       wch = priv->channel[CTCM_WRITE];
+       rch = priv->channel[CTCM_READ];
 
        switch (fsm_getstate(grp->fsm)) {
        case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
        CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
 
        /*the received direction should be the opposite of ours  */
-       if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
+       if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
                                XID2_READ_SIDE) != xid->xid2_dlc_type) {
                rc = 2;
                /* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
        if (grp == NULL)
                return;
 
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                struct xid2 *thisxid = ch->xid;
                ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
                        return -ENOMEM;
                }
 
-               *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
-               priv->channel[READ]->pdu_seq++;
+               *((__u32 *)skb_push(skb, 4)) =
+                       priv->channel[CTCM_READ]->pdu_seq;
+               priv->channel[CTCM_READ]->pdu_seq++;
                CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
-                               __func__, priv->channel[READ]->pdu_seq);
+                               __func__, priv->channel[CTCM_READ]->pdu_seq);
 
                /* receipt of CC03 resets anticipated sequence number on
                      receiving side */
-               priv->channel[READ]->pdu_seq = 0x00;
+               priv->channel[CTCM_READ]->pdu_seq = 0x00;
                skb_reset_mac_header(skb);
                skb->dev = dev;
                skb->protocol = htons(ETH_P_SNAP);
index 2b24550e865e672e9ef6761b667beba2231b235e..8305319b2a846c31328f5b773e8e0177cf6d41d0 100644 (file)
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
        int bs1;
        struct ctcm_priv *priv = dev_get_drvdata(dev);
 
-       if (!(priv && priv->channel[READ] &&
-                       (ndev = priv->channel[READ]->netdev))) {
+       ndev = priv->channel[CTCM_READ]->netdev;
+       if (!(priv && priv->channel[CTCM_READ] && ndev)) {
                CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
                return -ENODEV;
        }
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
            (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
                                        goto einval;
 
-       priv->channel[READ]->max_bufsize = bs1;
-       priv->channel[WRITE]->max_bufsize = bs1;
+       priv->channel[CTCM_READ]->max_bufsize = bs1;
+       priv->channel[CTCM_WRITE]->max_bufsize = bs1;
        if (!(ndev->flags & IFF_RUNNING))
                ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
-       priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
-       priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+       priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+       priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
 
        CTCM_DBF_DEV(SETUP, ndev, buf);
        return count;
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
        p += sprintf(p, "  Device FSM state: %s\n",
                     fsm_getstate_str(priv->fsm));
        p += sprintf(p, "  RX channel FSM state: %s\n",
-                    fsm_getstate_str(priv->channel[READ]->fsm));
+                    fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
        p += sprintf(p, "  TX channel FSM state: %s\n",
-                    fsm_getstate_str(priv->channel[WRITE]->fsm));
+                    fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
        p += sprintf(p, "  Max. TX buffer used: %ld\n",
                     priv->channel[WRITE]->prof.maxmulti);
        p += sprintf(p, "  Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
                     priv->channel[WRITE]->prof.tx_time);
 
        printk(KERN_INFO "Statistics for %s:\n%s",
-                               priv->channel[WRITE]->netdev->name, sbuf);
+                               priv->channel[CTCM_WRITE]->netdev->name, sbuf);
        kfree(sbuf);
        return;
 }
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
                return -ENODEV;
        /* Reset statistics */
        memset(&priv->channel[WRITE]->prof, 0,
-                               sizeof(priv->channel[WRITE]->prof));
+                               sizeof(priv->channel[CTCM_WRITE]->prof));
        return count;
 }
 
index ffea35c638799a4dcb7888e4c530afe1347190ab..0d5eeadf6121a23259c750490f46ef9c38d1d245 100644 (file)
@@ -31,21 +31,20 @@ static struct afs_cell *afs_cell_root;
  * allocate a cell record and fill in its name, VL server address list and
  * allocate an anonymous key
  */
-static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
+static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
+                                      char *vllist)
 {
        struct afs_cell *cell;
        struct key *key;
-       size_t namelen;
        char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
        char  *dvllist = NULL, *_vllist = NULL;
        char  delimiter = ':';
        int ret;
 
-       _enter("%s,%s", name, vllist);
+       _enter("%*.*s,%s", namelen, namelen, name ?: "", vllist);
 
        BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
 
-       namelen = strlen(name);
        if (namelen > AFS_MAXCELLNAME) {
                _leave(" = -ENAMETOOLONG");
                return ERR_PTR(-ENAMETOOLONG);
@@ -73,6 +72,10 @@ static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
        if (!vllist || strlen(vllist) < 7) {
                ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL);
                if (ret < 0) {
+                       if (ret == -ENODATA || ret == -EAGAIN || ret == -ENOKEY)
+                               /* translate these errors into something
+                                * userspace might understand */
+                               ret = -EDESTADDRREQ;
                        _leave(" = %d", ret);
                        return ERR_PTR(ret);
                }
@@ -138,26 +141,29 @@ error:
 }
 
 /*
- * create a cell record
- * - "name" is the name of the cell
- * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ * afs_cell_crate() - create a cell record
+ * @name:      is the name of the cell.
+ * @namsesz:   is the strlen of the cell name.
+ * @vllist:    is a colon separated list of IP addresses in "a.b.c.d" format.
+ * @retref:    is T to return the cell reference when the cell exists.
  */
-struct afs_cell *afs_cell_create(const char *name, char *vllist)
+struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
+                                char *vllist, bool retref)
 {
        struct afs_cell *cell;
        int ret;
 
-       _enter("%s,%s", name, vllist);
+       _enter("%*.*s,%s", namesz, namesz, name ?: "", vllist);
 
        down_write(&afs_cells_sem);
        read_lock(&afs_cells_lock);
        list_for_each_entry(cell, &afs_cells, link) {
-               if (strcasecmp(cell->name, name) == 0)
+               if (strncasecmp(cell->name, name, namesz) == 0)
                        goto duplicate_name;
        }
        read_unlock(&afs_cells_lock);
 
-       cell = afs_cell_alloc(name, vllist);
+       cell = afs_cell_alloc(name, namesz, vllist);
        if (IS_ERR(cell)) {
                _leave(" = %ld", PTR_ERR(cell));
                up_write(&afs_cells_sem);
@@ -197,8 +203,18 @@ error:
        return ERR_PTR(ret);
 
 duplicate_name:
+       if (retref && !IS_ERR(cell))
+               afs_get_cell(cell);
+
        read_unlock(&afs_cells_lock);
        up_write(&afs_cells_sem);
+
+       if (retref) {
+               _leave(" = %p", cell);
+               return cell;
+       }
+
+       _leave(" = -EEXIST");
        return ERR_PTR(-EEXIST);
 }
 
@@ -229,7 +245,7 @@ int afs_cell_init(char *rootcell)
                *cp++ = 0;
 
        /* allocate a cell record for the root cell */
-       new_root = afs_cell_create(rootcell, cp);
+       new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false);
        if (IS_ERR(new_root)) {
                _leave(" = %ld", PTR_ERR(new_root));
                return PTR_ERR(new_root);
@@ -249,11 +265,12 @@ int afs_cell_init(char *rootcell)
 /*
  * lookup a cell record
  */
-struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
+struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
+                                bool dns_cell)
 {
        struct afs_cell *cell;
 
-       _enter("\"%*.*s\",", namesz, namesz, name ? name : "");
+       _enter("\"%*.*s\",", namesz, namesz, name ?: "");
 
        down_read(&afs_cells_sem);
        read_lock(&afs_cells_lock);
@@ -267,6 +284,8 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
                        }
                }
                cell = ERR_PTR(-ENOENT);
+               if (dns_cell)
+                       goto create_cell;
        found:
                ;
        } else {
@@ -289,6 +308,15 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
        up_read(&afs_cells_sem);
        _leave(" = %p", cell);
        return cell;
+
+create_cell:
+       read_unlock(&afs_cells_lock);
+       up_read(&afs_cells_sem);
+
+       cell = afs_cell_create(name, namesz, NULL, true);
+
+       _leave(" = %p", cell);
+       return cell;
 }
 
 #if 0
index b42d5cc1d6d21aace96eaae96fe3aa521822c7c9..0d38c09bd55e10f4eaf5f0e25ef11fa3d9d82919 100644 (file)
@@ -476,6 +476,40 @@ static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
        return 0;
 }
 
+/*
+ * Try to auto mount the mountpoint with pseudo directory, if the autocell
+ * operation is setted.
+ */
+static struct inode *afs_try_auto_mntpt(
+       int ret, struct dentry *dentry, struct inode *dir, struct key *key,
+       struct afs_fid *fid)
+{
+       const char *devname = dentry->d_name.name;
+       struct afs_vnode *vnode = AFS_FS_I(dir);
+       struct inode *inode;
+
+       _enter("%d, %p{%s}, {%x:%u}, %p",
+              ret, dentry, devname, vnode->fid.vid, vnode->fid.vnode, key);
+
+       if (ret != -ENOENT ||
+           !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+               goto out;
+
+       inode = afs_iget_autocell(dir, devname, strlen(devname), key);
+       if (IS_ERR(inode)) {
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       *fid = AFS_FS_I(inode)->fid;
+       _leave("= %p", inode);
+       return inode;
+
+out:
+       _leave("= %d", ret);
+       return ERR_PTR(ret);
+}
+
 /*
  * look up an entry in a directory
  */
@@ -520,6 +554,13 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
 
        ret = afs_do_lookup(dir, dentry, &fid, key);
        if (ret < 0) {
+               inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
+               if (!IS_ERR(inode)) {
+                       key_put(key);
+                       goto success;
+               }
+
+               ret = PTR_ERR(inode);
                key_put(key);
                if (ret == -ENOENT) {
                        d_add(dentry, NULL);
@@ -539,6 +580,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
                return ERR_CAST(inode);
        }
 
+success:
        dentry->d_op = &afs_fs_dentry_operations;
 
        d_add(dentry, inode);
@@ -696,8 +738,9 @@ static int afs_d_delete(struct dentry *dentry)
                goto zap;
 
        if (dentry->d_inode &&
-           test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags))
-                       goto zap;
+           (test_bit(AFS_VNODE_DELETED,   &AFS_FS_I(dentry->d_inode)->flags) ||
+            test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags)))
+               goto zap;
 
        _leave(" = 0 [keep]");
        return 0;
index 320ffef115746362defa94edef14b3a083df64ae..0747339011c31261a1cd1e473db702395773e69b 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 #include <linux/sched.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
 #include "internal.h"
 
 struct afs_iget_data {
@@ -101,6 +103,16 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
                inode->i_version == data->fid.unique;
 }
 
+/*
+ * iget5() comparator for inode created by autocell operations
+ *
+ * These pseudo inodes don't match anything.
+ */
+static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
+{
+       return 0;
+}
+
 /*
  * iget5() inode initialiser
  */
@@ -117,6 +129,67 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
        return 0;
 }
 
+/*
+ * inode retrieval for autocell
+ */
+struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+                               int namesz, struct key *key)
+{
+       struct afs_iget_data data;
+       struct afs_super_info *as;
+       struct afs_vnode *vnode;
+       struct super_block *sb;
+       struct inode *inode;
+       static atomic_t afs_autocell_ino;
+
+       _enter("{%x:%u},%*.*s,",
+              AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+              namesz, namesz, dev_name ?: "");
+
+       sb = dir->i_sb;
+       as = sb->s_fs_info;
+       data.volume = as->volume;
+       data.fid.vid = as->volume->vid;
+       data.fid.unique = 0;
+       data.fid.vnode = 0;
+
+       inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
+                            afs_iget5_autocell_test, afs_iget5_set,
+                            &data);
+       if (!inode) {
+               _leave(" = -ENOMEM");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
+              inode, inode->i_ino, data.fid.vid, data.fid.vnode,
+              data.fid.unique);
+
+       vnode = AFS_FS_I(inode);
+
+       /* there shouldn't be an existing inode */
+       BUG_ON(!(inode->i_state & I_NEW));
+
+       inode->i_size           = 0;
+       inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
+       inode->i_op             = &afs_autocell_inode_operations;
+       inode->i_nlink          = 2;
+       inode->i_uid            = 0;
+       inode->i_gid            = 0;
+       inode->i_ctime.tv_sec   = get_seconds();
+       inode->i_ctime.tv_nsec  = 0;
+       inode->i_atime          = inode->i_mtime = inode->i_ctime;
+       inode->i_blocks         = 0;
+       inode->i_version        = 0;
+       inode->i_generation     = 0;
+
+       set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+       inode->i_flags |= S_NOATIME;
+       unlock_new_inode(inode);
+       _leave(" = %p", inode);
+       return inode;
+}
+
 /*
  * inode retrieval
  */
@@ -313,6 +386,19 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        return 0;
 }
 
+/*
+ * discard an AFS inode
+ */
+int afs_drop_inode(struct inode *inode)
+{
+       _enter("");
+
+       if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
+               return generic_delete_inode(inode);
+       else
+               return generic_drop_inode(inode);
+}
+
 /*
  * clear an AFS inode
  */
index c6c93f180707b41e47d0101ee7188f34fed68927..cca8eef736fcf2b6f9a9516e96c4fc06c8d228be 100644 (file)
@@ -42,6 +42,7 @@ typedef enum {
 struct afs_mount_params {
        bool                    rwpath;         /* T if the parent should be considered R/W */
        bool                    force;          /* T to force cell type */
+       bool                    autocell;       /* T if set auto mount operation */
        afs_voltype_t           type;           /* type of volume requested */
        int                     volnamesz;      /* size of volume name */
        const char              *volname;       /* name of volume to mount */
@@ -358,6 +359,8 @@ struct afs_vnode {
 #define AFS_VNODE_READLOCKED   7               /* set if vnode is read-locked on the server */
 #define AFS_VNODE_WRITELOCKED  8               /* set if vnode is write-locked on the server */
 #define AFS_VNODE_UNLOCKING    9               /* set if vnode is being unlocked on the server */
+#define AFS_VNODE_AUTOCELL     10              /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR    11              /* set if Vnode is a pseudo directory */
 
        long                    acl_order;      /* ACL check count (callback break count) */
 
@@ -468,8 +471,8 @@ extern struct list_head afs_proc_cells;
 
 #define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
 extern int afs_cell_init(char *);
-extern struct afs_cell *afs_cell_create(const char *, char *);
-extern struct afs_cell *afs_cell_lookup(const char *, unsigned);
+extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
+extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
 extern struct afs_cell *afs_grab_cell(struct afs_cell *);
 extern void afs_put_cell(struct afs_cell *);
 extern void afs_cell_purge(void);
@@ -558,6 +561,8 @@ extern int afs_fs_release_lock(struct afs_server *, struct key *,
 /*
  * inode.c
  */
+extern struct inode *afs_iget_autocell(struct inode *, const char *, int,
+                                      struct key *);
 extern struct inode *afs_iget(struct super_block *, struct key *,
                              struct afs_fid *, struct afs_file_status *,
                              struct afs_callback *);
@@ -566,6 +571,7 @@ extern int afs_validate(struct afs_vnode *, struct key *);
 extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int afs_setattr(struct dentry *, struct iattr *);
 extern void afs_evict_inode(struct inode *);
+extern int afs_drop_inode(struct inode *);
 
 /*
  * main.c
@@ -581,6 +587,7 @@ extern int afs_abort_to_error(u32);
  * mntpt.c
  */
 extern const struct inode_operations afs_mntpt_inode_operations;
+extern const struct inode_operations afs_autocell_inode_operations;
 extern const struct file_operations afs_mntpt_file_operations;
 
 extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
index a9e23039ea34604649156b66cd10dc664ece394a..6d552686c498fae427e2b9408bd03e96e1153699 100644 (file)
@@ -38,6 +38,11 @@ const struct inode_operations afs_mntpt_inode_operations = {
        .getattr        = afs_getattr,
 };
 
+const struct inode_operations afs_autocell_inode_operations = {
+       .follow_link    = afs_mntpt_follow_link,
+       .getattr        = afs_getattr,
+};
+
 static LIST_HEAD(afs_vfsmounts);
 static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
 
@@ -136,20 +141,16 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
 {
        struct afs_super_info *super;
        struct vfsmount *mnt;
+       struct afs_vnode *vnode;
        struct page *page;
-       size_t size;
-       char *buf, *devname, *options;
+       char *devname, *options;
+       bool rwpath = false;
        int ret;
 
        _enter("{%s}", mntpt->d_name.name);
 
        BUG_ON(!mntpt->d_inode);
 
-       ret = -EINVAL;
-       size = mntpt->d_inode->i_size;
-       if (size > PAGE_SIZE - 1)
-               goto error_no_devname;
-
        ret = -ENOMEM;
        devname = (char *) get_zeroed_page(GFP_KERNEL);
        if (!devname)
@@ -159,28 +160,59 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
        if (!options)
                goto error_no_options;
 
-       /* read the contents of the AFS special symlink */
-       page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
-       if (IS_ERR(page)) {
-               ret = PTR_ERR(page);
-               goto error_no_page;
+       vnode = AFS_FS_I(mntpt->d_inode);
+       if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
+               /* if the directory is a pseudo directory, use the d_name */
+               static const char afs_root_cell[] = ":root.cell.";
+               unsigned size = mntpt->d_name.len;
+
+               ret = -ENOENT;
+               if (size < 2 || size > AFS_MAXCELLNAME)
+                       goto error_no_page;
+
+               if (mntpt->d_name.name[0] == '.') {
+                       devname[0] = '#';
+                       memcpy(devname + 1, mntpt->d_name.name, size - 1);
+                       memcpy(devname + size, afs_root_cell,
+                              sizeof(afs_root_cell));
+                       rwpath = true;
+               } else {
+                       devname[0] = '%';
+                       memcpy(devname + 1, mntpt->d_name.name, size);
+                       memcpy(devname + size + 1, afs_root_cell,
+                              sizeof(afs_root_cell));
+               }
+       } else {
+               /* read the contents of the AFS special symlink */
+               loff_t size = i_size_read(mntpt->d_inode);
+               char *buf;
+
+               ret = -EINVAL;
+               if (size > PAGE_SIZE - 1)
+                       goto error_no_page;
+
+               page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       goto error_no_page;
+               }
+
+               ret = -EIO;
+               if (PageError(page))
+                       goto error;
+
+               buf = kmap_atomic(page, KM_USER0);
+               memcpy(devname, buf, size);
+               kunmap_atomic(buf, KM_USER0);
+               page_cache_release(page);
+               page = NULL;
        }
 
-       ret = -EIO;
-       if (PageError(page))
-               goto error;
-
-       buf = kmap_atomic(page, KM_USER0);
-       memcpy(devname, buf, size);
-       kunmap_atomic(buf, KM_USER0);
-       page_cache_release(page);
-       page = NULL;
-
        /* work out what options we want */
        super = AFS_FS_S(mntpt->d_sb);
        memcpy(options, "cell=", 5);
        strcpy(options + 5, super->volume->cell->name);
-       if (super->volume->type == AFSVL_RWVOL)
+       if (super->volume->type == AFSVL_RWVOL || rwpath)
                strcat(options, ",rwpath");
 
        /* try and do the mount */
index 852739d262a9f7eecff472d33845adb2a0a310f0..096b23f821a1081a2370df2de59276c9d08b807f 100644 (file)
@@ -294,7 +294,7 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
        if (strcmp(kbuf, "add") == 0) {
                struct afs_cell *cell;
 
-               cell = afs_cell_create(name, args);
+               cell = afs_cell_create(name, strlen(name), args, false);
                if (IS_ERR(cell)) {
                        ret = PTR_ERR(cell);
                        goto done;
index 9cf80f02da16a119747eb314ace489e50b1aaabf..77e1e5a61154c6796d80d709ed31722017d90e18 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
@@ -48,6 +49,7 @@ struct file_system_type afs_fs_type = {
 static const struct super_operations afs_super_ops = {
        .statfs         = afs_statfs,
        .alloc_inode    = afs_alloc_inode,
+       .drop_inode     = afs_drop_inode,
        .destroy_inode  = afs_destroy_inode,
        .evict_inode    = afs_evict_inode,
        .put_super      = afs_put_super,
@@ -62,12 +64,14 @@ enum {
        afs_opt_cell,
        afs_opt_rwpath,
        afs_opt_vol,
+       afs_opt_autocell,
 };
 
 static const match_table_t afs_options_list = {
        { afs_opt_cell,         "cell=%s"       },
        { afs_opt_rwpath,       "rwpath"        },
        { afs_opt_vol,          "vol=%s"        },
+       { afs_opt_autocell,     "autocell"      },
        { afs_no_opt,           NULL            },
 };
 
@@ -151,7 +155,8 @@ static int afs_parse_options(struct afs_mount_params *params,
                switch (token) {
                case afs_opt_cell:
                        cell = afs_cell_lookup(args[0].from,
-                                              args[0].to - args[0].from);
+                                              args[0].to - args[0].from,
+                                              false);
                        if (IS_ERR(cell))
                                return PTR_ERR(cell);
                        afs_put_cell(params->cell);
@@ -166,6 +171,10 @@ static int afs_parse_options(struct afs_mount_params *params,
                        *devname = args[0].from;
                        break;
 
+               case afs_opt_autocell:
+                       params->autocell = 1;
+                       break;
+
                default:
                        printk(KERN_ERR "kAFS:"
                               " Unknown or invalid mount option: '%s'\n", p);
@@ -252,10 +261,10 @@ static int afs_parse_device_name(struct afs_mount_params *params,
 
        /* lookup the cell record */
        if (cellname || !params->cell) {
-               cell = afs_cell_lookup(cellname, cellnamesz);
+               cell = afs_cell_lookup(cellname, cellnamesz, true);
                if (IS_ERR(cell)) {
-                       printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
-                              cellname ?: "");
+                       printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
+                              cellnamesz, cellnamesz, cellname ?: "");
                        return PTR_ERR(cell);
                }
                afs_put_cell(params->cell);
@@ -321,6 +330,9 @@ static int afs_fill_super(struct super_block *sb, void *data)
        if (IS_ERR(inode))
                goto error_inode;
 
+       if (params->autocell)
+               set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
+
        ret = -ENOMEM;
        root = d_alloc_root(inode);
        if (!root)
index a7081eeeb85d08729588f653469a7bf3f1dac8e2..7099a526f775d24325c7d2c6ebd213faf1d6d87c 100644 (file)
@@ -301,6 +301,16 @@ A partial list of the supported mount options follows:
   gid          Set the default gid for inodes (similar to above).
   file_mode     If CIFS Unix extensions are not supported by the server
                this overrides the default mode for file inodes.
+  fsc          Enable local disk caching using FS-Cache (off by default). This
+               option could be useful to improve performance on a slow link,
+               heavily loaded server and/or network where reading from the
+               disk is faster than reading from the server (over the network).
+               This could also impact scalability positively as the
+               number of calls to the server are reduced. However, local
+               caching is not suitable for all workloads for e.g. read-once
+               type workloads. So, you need to consider carefully your
+               workload/scenario before using this option. Currently, local
+               disk caching is functional for CIFS files opened as read-only.
   dir_mode      If CIFS Unix extensions are not supported by the server 
                this overrides the default mode for directory inodes.
   port         attempt to contact the server on this tcp port, before
index 2fc3b3c08911916b7a8740c6734a300a0101cf23..edecd36fed9bdcf7f49411a3bcf2f97283bd077b 100644 (file)
@@ -230,15 +230,6 @@ static void __fput(struct file *file)
        might_sleep();
 
        fsnotify_close(file);
-
-       /*
-        * fsnotify_create_event may have taken one or more references on this
-        * file.  If it did so it left one reference for us to drop to make sure
-        * its calls to fput could not prematurely destroy the file.
-        */
-       if (atomic_long_read(&file->f_count))
-               return fput(file);
-
        /*
         * The function eventpoll_release() should be the first called
         * in the file cleanup chain.
index cc1bb33b59b8d919b64c3b8c2895c3b08c6d80f5..26a510a7be0908023eaa5848002eced097f6e9dc 100644 (file)
@@ -100,3 +100,20 @@ config NFS_FSCACHE
        help
          Say Y here if you want NFS data to be cached locally on disc through
          the general filesystem cache manager
+
+config NFS_USE_LEGACY_DNS
+       bool "Use the legacy NFS DNS resolver"
+       depends on NFS_V4
+       help
+         The kernel now provides a method for translating a host name into an
+         IP address.  Select Y here if you would rather use your own DNS
+         resolver script.
+
+         If unsure, say N
+
+config NFS_USE_KERNEL_DNS
+       bool
+       depends on NFS_V4 && !NFS_USE_LEGACY_DNS
+       select DNS_RESOLVER
+       select KEYS
+       default y
index 76fd235d0024e1970d44044536d2d929c02b9256..dba50a5625db7987d1e4a757063f3a5412ff5928 100644 (file)
@@ -6,6 +6,29 @@
  * Resolves DNS hostnames into valid ip addresses
  */
 
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/dns_resolver.h>
+
+ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+               struct sockaddr *sa, size_t salen)
+{
+       ssize_t ret;
+       char *ip_addr = NULL;
+       int ip_len;
+
+       ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
+       if (ip_len > 0)
+               ret = rpc_pton(ip_addr, ip_len, sa, salen);
+       else
+               ret = -ESRCH;
+       kfree(ip_addr);
+       return ret;
+}
+
+#else
+
 #include <linux/hash.h>
 #include <linux/string.h>
 #include <linux/kmod.h>
@@ -346,3 +369,4 @@ void nfs_dns_resolver_destroy(void)
        nfs_cache_unregister(&nfs_dns_resolve);
 }
 
+#endif
index a3f0938babf7685360fcb43bf84e29c8fcfc8e5b..199bb5543a91ad3dfc1e03e2f85894f78661e4cb 100644 (file)
@@ -6,8 +6,20 @@
 
 #define NFS_DNS_HOSTNAME_MAXLEN        (128)
 
+
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+static inline int nfs_dns_resolver_init(void)
+{
+       return 0;
+}
+
+static inline void nfs_dns_resolver_destroy(void)
+{}
+#else
 extern int nfs_dns_resolver_init(void);
 extern void nfs_dns_resolver_destroy(void);
+#endif
+
 extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
                struct sockaddr *sa, size_t salen);
 
index eb8f73c9c131ffaf443aec44d45771fac8cfb483..756566fe844909c2499f3e5a53903fd2f62f2419 100644 (file)
@@ -17,9 +17,9 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
            old->data_type == new->data_type &&
            old->tgid == new->tgid) {
                switch (old->data_type) {
-               case (FSNOTIFY_EVENT_FILE):
-                       if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
-                           (old->file->f_path.dentry == new->file->f_path.dentry))
+               case (FSNOTIFY_EVENT_PATH):
+                       if ((old->path.mnt == new->path.mnt) &&
+                           (old->path.dentry == new->path.dentry))
                                return true;
                case (FSNOTIFY_EVENT_NONE):
                        return true;
@@ -174,7 +174,7 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,
                return false;
 
        /* if we don't have enough info to send an event to userspace say no */
-       if (data_type != FSNOTIFY_EVENT_FILE)
+       if (data_type != FSNOTIFY_EVENT_PATH)
                return false;
 
        if (inode_mark && vfsmnt_mark) {
index 25a3b4dfcf61286c4eddceddbfbf440863bc384c..032b837fcd11912e54a01920ddf40b9506f1c8f9 100644 (file)
@@ -65,7 +65,7 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
        if (client_fd < 0)
                return client_fd;
 
-       if (event->data_type != FSNOTIFY_EVENT_FILE) {
+       if (event->data_type != FSNOTIFY_EVENT_PATH) {
                WARN_ON(1);
                put_unused_fd(client_fd);
                return -EINVAL;
@@ -75,8 +75,8 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
         * we need a new file handle for the userspace program so it can read even if it was
         * originally opened O_WRONLY.
         */
-       dentry = dget(event->file->f_path.dentry);
-       mnt = mntget(event->file->f_path.mnt);
+       dentry = dget(event->path.dentry);
+       mnt = mntget(event->path.mnt);
        /* it's possible this event was an overflow event.  in that case dentry and mnt
         * are NULL;  That's fine, just don't call dentry open */
        if (dentry && mnt)
index 4d2a82c1ceb1bfa2574f5af86d290a0795742254..3970392b272264810574ca6536eed74bd3fc013d 100644 (file)
@@ -84,7 +84,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
 }
 
 /* Notify this dentry's parent about a child's events. */
-void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 {
        struct dentry *parent;
        struct inode *p_inode;
@@ -92,7 +92,7 @@ void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
        bool should_update_children = false;
 
        if (!dentry)
-               dentry = file->f_path.dentry;
+               dentry = path->dentry;
 
        if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
                return;
@@ -124,8 +124,8 @@ void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
                 * specifies these are events which came from a child. */
                mask |= FS_EVENT_ON_CHILD;
 
-               if (file)
-                       fsnotify(p_inode, mask, file, FSNOTIFY_EVENT_FILE,
+               if (path)
+                       fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
                                 dentry->d_name.name, 0);
                else
                        fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
@@ -217,8 +217,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
        /* global tests shouldn't care about events on child only the specific event */
        __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
 
-       if (data_is == FSNOTIFY_EVENT_FILE)
-               mnt = ((struct file *)data)->f_path.mnt;
+       if (data_is == FSNOTIFY_EVENT_PATH)
+               mnt = ((struct path *)data)->mnt;
        else
                mnt = NULL;
 
index 5e73eeb2c69721fa799ef358c9f6dd1142b9c5fb..a91b69a6a291be3e29082a69c8697cb049d5992c 100644 (file)
@@ -52,9 +52,9 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
                            !strcmp(old->file_name, new->file_name))
                                return true;
                        break;
-               case (FSNOTIFY_EVENT_FILE):
-                       if ((old->file->f_path.mnt == new->file->f_path.mnt) &&
-                           (old->file->f_path.dentry == new->file->f_path.dentry))
+               case (FSNOTIFY_EVENT_PATH):
+                       if ((old->path.mnt == new->path.mnt) &&
+                           (old->path.dentry == new->path.dentry))
                                return true;
                        break;
                case (FSNOTIFY_EVENT_NONE):
@@ -147,10 +147,10 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
                                      __u32 mask, void *data, int data_type)
 {
        if ((inode_mark->mask & FS_EXCL_UNLINK) &&
-           (data_type == FSNOTIFY_EVENT_FILE)) {
-               struct file *file  = data;
+           (data_type == FSNOTIFY_EVENT_PATH)) {
+               struct path *path = data;
 
-               if (d_unlinked(file->f_path.dentry))
+               if (d_unlinked(path->dentry))
                        return false;
        }
 
index d6c435adc7a2140d491d25ae3f91156b96da3bc1..f39260f8f8656e42feac01e728497cd05650d881 100644 (file)
@@ -31,7 +31,6 @@
  * allocated and used.
  */
 
-#include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -90,8 +89,8 @@ void fsnotify_put_event(struct fsnotify_event *event)
        if (atomic_dec_and_test(&event->refcnt)) {
                pr_debug("%s: event=%p\n", __func__, event);
 
-               if (event->data_type == FSNOTIFY_EVENT_FILE)
-                       fput(event->file);
+               if (event->data_type == FSNOTIFY_EVENT_PATH)
+                       path_put(&event->path);
 
                BUG_ON(!list_empty(&event->private_data_list));
 
@@ -376,8 +375,8 @@ struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
                }
        }
        event->tgid = get_pid(old_event->tgid);
-       if (event->data_type == FSNOTIFY_EVENT_FILE)
-               get_file(event->file);
+       if (event->data_type == FSNOTIFY_EVENT_PATH)
+               path_get(&event->path);
 
        return event;
 }
@@ -424,22 +423,11 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
        event->data_type = data_type;
 
        switch (data_type) {
-       case FSNOTIFY_EVENT_FILE: {
-               event->file = data;
-               /*
-                * if this file is about to disappear hold an extra reference
-                * until we return to __fput so we don't have to worry about
-                * future get/put destroying the file under us or generating
-                * additional events.  Notice that we change f_mode without
-                * holding f_lock.  This is safe since this is the only possible
-                * reference to this object in the kernel (it was about to be
-                * freed, remember?)
-                */
-               if (!atomic_long_read(&event->file->f_count)) {
-                       event->file->f_mode |= FMODE_NONOTIFY;
-                       get_file(event->file);
-               }
-               get_file(event->file);
+       case FSNOTIFY_EVENT_PATH: {
+               struct path *path = data;
+               event->path.dentry = path->dentry;
+               event->path.mnt = path->mnt;
+               path_get(&event->path);
                break;
        }
        case FSNOTIFY_EVENT_INODE:
@@ -447,7 +435,8 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
                break;
        case FSNOTIFY_EVENT_NONE:
                event->inode = NULL;
-               event->file = NULL;
+               event->path.dentry = NULL;
+               event->path.mnt = NULL;
                break;
        default:
                BUG();
index 848480bc2bf93846b14e43c0c94493cb2109f889..2308fbb4523a399abce0809860f6f398c8844ed5 100644 (file)
@@ -129,7 +129,7 @@ static inline void random_ether_addr(u8 *addr)
 /**
  * dev_hw_addr_random - Create random MAC and set device flag
  * @dev: pointer to net_device structure
- * @addr: Pointer to a six-byte array containing the Ethernet address
+ * @hwaddr: Pointer to a six-byte array containing the Ethernet address
  *
  * Generate random MAC to be used by a device and set addr_assign_type
  * so the state can be read by sysfs and be used by udev.
index e4e2204187ee2ae12e8eb0601e0fc078af68bb51..59d0df43ff9d5ef649fd109228ded45f37579d84 100644 (file)
@@ -26,18 +26,19 @@ static inline void fsnotify_d_instantiate(struct dentry *dentry,
 }
 
 /* Notify this dentry's parent about a child's events. */
-static inline void fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+static inline void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 {
        if (!dentry)
-               dentry = file->f_path.dentry;
+               dentry = path->dentry;
 
-       __fsnotify_parent(file, dentry, mask);
+       __fsnotify_parent(path, dentry, mask);
 }
 
 /* simple call site for access decisions */
 static inline int fsnotify_perm(struct file *file, int mask)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct path *path = &file->f_path;
+       struct inode *inode = path->dentry->d_inode;
        __u32 fsnotify_mask = 0;
 
        if (file->f_mode & FMODE_NONOTIFY)
@@ -51,7 +52,7 @@ static inline int fsnotify_perm(struct file *file, int mask)
        else
                BUG();
 
-       return fsnotify(inode, fsnotify_mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+       return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
 }
 
 /*
@@ -186,15 +187,16 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
  */
 static inline void fsnotify_access(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct path *path = &file->f_path;
+       struct inode *inode = path->dentry->d_inode;
        __u32 mask = FS_ACCESS;
 
        if (S_ISDIR(inode->i_mode))
                mask |= FS_IN_ISDIR;
 
        if (!(file->f_mode & FMODE_NONOTIFY)) {
-               fsnotify_parent(file, NULL, mask);
-               fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+               fsnotify_parent(path, NULL, mask);
+               fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
        }
 }
 
@@ -203,15 +205,16 @@ static inline void fsnotify_access(struct file *file)
  */
 static inline void fsnotify_modify(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct path *path = &file->f_path;
+       struct inode *inode = path->dentry->d_inode;
        __u32 mask = FS_MODIFY;
 
        if (S_ISDIR(inode->i_mode))
                mask |= FS_IN_ISDIR;
 
        if (!(file->f_mode & FMODE_NONOTIFY)) {
-               fsnotify_parent(file, NULL, mask);
-               fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+               fsnotify_parent(path, NULL, mask);
+               fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
        }
 }
 
@@ -220,15 +223,16 @@ static inline void fsnotify_modify(struct file *file)
  */
 static inline void fsnotify_open(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct path *path = &file->f_path;
+       struct inode *inode = path->dentry->d_inode;
        __u32 mask = FS_OPEN;
 
        if (S_ISDIR(inode->i_mode))
                mask |= FS_IN_ISDIR;
 
        if (!(file->f_mode & FMODE_NONOTIFY)) {
-               fsnotify_parent(file, NULL, mask);
-               fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+               fsnotify_parent(path, NULL, mask);
+               fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
        }
 }
 
@@ -237,6 +241,7 @@ static inline void fsnotify_open(struct file *file)
  */
 static inline void fsnotify_close(struct file *file)
 {
+       struct path *path = &file->f_path;
        struct inode *inode = file->f_path.dentry->d_inode;
        fmode_t mode = file->f_mode;
        __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
@@ -245,8 +250,8 @@ static inline void fsnotify_close(struct file *file)
                mask |= FS_IN_ISDIR;
 
        if (!(file->f_mode & FMODE_NONOTIFY)) {
-               fsnotify_parent(file, NULL, mask);
-               fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+               fsnotify_parent(path, NULL, mask);
+               fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
        }
 }
 
index 9bbfd7204b04aff0fe6a06b34734df452b12b773..ed36fb57c426b13ce2b9ec3df0574779607949f5 100644 (file)
@@ -203,20 +203,20 @@ struct fsnotify_event {
        /* to_tell may ONLY be dereferenced during handle_event(). */
        struct inode *to_tell;  /* either the inode the event happened to or its parent */
        /*
-        * depending on the event type we should have either a file or inode
-        * We hold a reference on file, but NOT on inode.  Since we have the ref on
-        * the file, it may be dereferenced at any point during this object's
+        * depending on the event type we should have either a path or inode
+        * We hold a reference on path, but NOT on inode.  Since we have the ref on
+        * the path, it may be dereferenced at any point during this object's
         * lifetime.  That reference is dropped when this object's refcnt hits
-        * 0.  If this event contains an inode instead of a file, the inode may
+        * 0.  If this event contains an inode instead of a path, the inode may
         * ONLY be used during handle_event().
         */
        union {
-               struct file *file;
+               struct path path;
                struct inode *inode;
        };
 /* when calling fsnotify tell it if the data is a path or inode */
 #define FSNOTIFY_EVENT_NONE    0
-#define FSNOTIFY_EVENT_FILE    1
+#define FSNOTIFY_EVENT_PATH    1
 #define FSNOTIFY_EVENT_INODE   2
        int data_type;          /* which of the above union we have */
        atomic_t refcnt;        /* how many groups still are using/need to send this event */
@@ -293,7 +293,7 @@ struct fsnotify_mark {
 /* main fsnotify call to send events */
 extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
                    const unsigned char *name, u32 cookie);
-extern void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask);
+extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
 extern void __fsnotify_inode_delete(struct inode *inode);
 extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
 extern u32 fsnotify_get_cookie(void);
@@ -422,7 +422,7 @@ static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int da
        return 0;
 }
 
-static inline void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask)
+static inline void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
 {}
 
 static inline void __fsnotify_inode_delete(struct inode *inode)
index 413742c92d14efce0e98743a101a67fb3e8c2f97..791d5109f34c12207de65f06cca05fa4b35b44b8 100644 (file)
@@ -122,7 +122,7 @@ static inline int netpoll_tx_running(struct net_device *dev)
 }
 
 #else
-static inline int netpoll_rx(struct sk_buff *skb)
+static inline bool netpoll_rx(struct sk_buff *skb)
 {
        return 0;
 }
index 636724b203eef0f2c6fa519fe626df2a707029d3..6c241444f902800baf1c4cd16aa89d767d751065 100644 (file)
@@ -33,9 +33,9 @@
 #define L2CAP_DEFAULT_FLUSH_TO         0xffff
 #define L2CAP_DEFAULT_TX_WINDOW                63
 #define L2CAP_DEFAULT_MAX_TX           3
-#define L2CAP_DEFAULT_RETRANS_TO       1000    /* 1 second */
+#define L2CAP_DEFAULT_RETRANS_TO       2000    /* 2 seconds */
 #define L2CAP_DEFAULT_MONITOR_TO       12000   /* 12 seconds */
-#define L2CAP_DEFAULT_MAX_PDU_SIZE     672
+#define L2CAP_DEFAULT_MAX_PDU_SIZE     1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO           200
 #define L2CAP_LOCAL_BUSY_TRIES         12
 
index a441c9cdd62540cd7100189c5fa7aea2f6b0b22b..ac53bfbdfe16b57038cf6c0b7f88cc88f5221594 100644 (file)
@@ -195,7 +195,8 @@ struct sock_common {
   *    @sk_priority: %SO_PRIORITY setting
   *    @sk_type: socket type (%SOCK_STREAM, etc)
   *    @sk_protocol: which protocol this socket belongs in this network family
-  *    @sk_peercred: %SO_PEERCRED setting
+  *    @sk_peer_pid: &struct pid for this socket's peer
+  *    @sk_peer_cred: %SO_PEERCRED setting
   *    @sk_rcvlowat: %SO_RCVLOWAT setting
   *    @sk_rcvtimeo: %SO_RCVTIMEO setting
   *    @sk_sndtimeo: %SO_SNDTIMEO setting
@@ -211,6 +212,7 @@ struct sock_common {
   *    @sk_send_head: front of stuff to transmit
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
+  *    @sk_classid: this socket's cgroup classid
   *    @sk_write_pending: a write to stream socket waits to start
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
index 6bf2306be7d69510b3feda529c74b5a0c8496013..f0c9b2e7542dfd5b2980164a2f5ccedbcd2bb0d1 100644 (file)
@@ -526,8 +526,8 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
        BUG_ON(group != audit_watch_group);
 
        switch (event->data_type) {
-       case (FSNOTIFY_EVENT_FILE):
-               inode = event->file->f_path.dentry->d_inode;
+       case (FSNOTIFY_EVENT_PATH):
+               inode = event->path.dentry->d_inode;
                break;
        case (FSNOTIFY_EVENT_INODE):
                inode = event->inode;
index 858829d06a924291190c159614bd7f9dbbf374d1..9b3b73f4ae9c77350abd052abbdc889634258801 100644 (file)
@@ -2759,6 +2759,26 @@ out_release:
        return ret;
 }
 
+/*
+ * This is like a special single-page "expand_downwards()",
+ * except we must first make sure that 'address-PAGE_SIZE'
+ * doesn't hit another vma.
+ *
+ * The "find_vma()" will do the right thing even if we wrap
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+               address -= PAGE_SIZE;
+               if (find_vma(vma->vm_mm, address) != vma)
+                       return -ENOMEM;
+
+               expand_stack(vma, address);
+       }
+       return 0;
+}
+
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2772,6 +2792,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
+       if (check_stack_guard_page(vma, address) < 0) {
+               pte_unmap(page_table);
+               return VM_FAULT_SIGBUS;
+       }
+
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
index 3e3cd9d4e52ce6147ed5b506a8d4cb3f6248d85e..fadf26b4ed7c432eba09800b4532683fc44cc02d 100644 (file)
@@ -2705,8 +2705,9 @@ done:
                case L2CAP_MODE_ERTM:
                        pi->remote_tx_win = rfc.txwin_size;
                        pi->remote_max_tx = rfc.max_transmit;
-                       if (rfc.max_pdu_size > pi->conn->mtu - 10)
-                               rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+
+                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
 
                        pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
 
@@ -2723,8 +2724,8 @@ done:
                        break;
 
                case L2CAP_MODE_STREAMING:
-                       if (rfc.max_pdu_size > pi->conn->mtu - 10)
-                               rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
+                       if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
+                               rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
 
                        pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
 
@@ -2806,7 +2807,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
        if (*result == L2CAP_CONF_SUCCESS) {
                switch (rfc.mode) {
                case L2CAP_MODE_ERTM:
-                       pi->remote_tx_win   = rfc.txwin_size;
                        pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
                        pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
                        pi->mps    = le16_to_cpu(rfc.max_pdu_size);
@@ -2862,7 +2862,6 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
-               pi->remote_tx_win   = rfc.txwin_size;
                pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
                pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
                pi->mps    = le16_to_cpu(rfc.max_pdu_size);
index 01f238ff23466912ad1c243b6ca88b8a0c998f8b..c49a6695793ac8361b072077aefa4e2b863e8f5c 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/hardirq.h>
 #include <net/caif/cfpkt.h>
 
-#define PKT_PREFIX  16
+#define PKT_PREFIX  48
 #define PKT_POSTFIX 2
 #define PKT_LEN_WHEN_EXTENDING 128
 #define PKT_ERROR(pkt, errmsg) do {       \
index 9c65e9deb9c3ff6d6f958f4b52ee7e85d6299935..08ffe9e4be20aa49ab2bba82c36c5e988a4880fd 100644 (file)
 #include <net/sock.h>
 #include <net/net_namespace.h>
 
+/*
+ * To send multiple CAN frame content within TX_SETUP or to filter
+ * CAN messages with multiplex index within RX_SETUP, the number of
+ * different filters is limited to 256 due to the one byte index value.
+ */
+#define MAX_NFRAMES 256
+
 /* use of last_frames[index].can_dlc */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@ struct bcm_op {
        struct list_head list;
        int ifindex;
        canid_t can_id;
-       int flags;
+       u32 flags;
        unsigned long frames_abs, frames_filtered;
        struct timeval ival1, ival2;
        struct hrtimer timer, thrtimer;
        struct tasklet_struct tsklet, thrtsklet;
        ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
        int rx_ifindex;
-       int count;
-       int nframes;
-       int currframe;
+       u32 count;
+       u32 nframes;
+       u32 currframe;
        struct can_frame *frames;
        struct can_frame *last_frames;
        struct can_frame sframe;
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
                seq_printf(m, "rx_op: %03X %-5s ",
                                op->can_id, bcm_proc_getifname(ifname, op->ifindex));
-               seq_printf(m, "[%d]%c ", op->nframes,
+               seq_printf(m, "[%u]%c ", op->nframes,
                                (op->flags & RX_CHECK_DLC)?'d':' ');
                if (op->kt_ival1.tv64)
                        seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
        list_for_each_entry(op, &bo->tx_ops, list) {
 
-               seq_printf(m, "tx_op: %03X %s [%d] ",
+               seq_printf(m, "tx_op: %03X %s [%u] ",
                                op->can_id,
                                bcm_proc_getifname(ifname, op->ifindex),
                                op->nframes);
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
        struct can_frame *firstframe;
        struct sockaddr_can *addr;
        struct sock *sk = op->sk;
-       int datalen = head->nframes * CFSIZ;
+       unsigned int datalen = head->nframes * CFSIZ;
        int err;
 
        skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@ rx_changed_settime:
  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  *                       received data stored in op->last_frames[]
  */
-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
                                const struct can_frame *rxdata)
 {
        /*
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
 /*
  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  */
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+                                 unsigned int index)
 {
        if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
                if (update)
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
        int updated = 0;
 
        if (op->nframes > 1) {
-               int i;
+               unsigned int i;
 
                /* for MUX filter we start at index 1 */
                for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
 {
        struct bcm_op *op = (struct bcm_op *)data;
        const struct can_frame *rxframe = (struct can_frame *)skb->data;
-       int i;
+       unsigned int i;
 
        /* disable timeout */
        hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 {
        struct bcm_sock *bo = bcm_sk(sk);
        struct bcm_op *op;
-       int i, err;
+       unsigned int i;
+       int err;
 
        /* we need a real device to send frames */
        if (!ifindex)
                return -ENODEV;
 
-       /* we need at least one can_frame */
-       if (msg_head->nframes < 1)
+       /* check nframes boundaries - we need at least one can_frame */
+       if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
                return -EINVAL;
 
        /* check the given can_id */
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                msg_head->nframes = 0;
        }
 
+       /* the first element contains the mux-mask => MAX_NFRAMES + 1  */
+       if (msg_head->nframes > MAX_NFRAMES + 1)
+               return -EINVAL;
+
        if ((msg_head->flags & RX_RTR_FRAME) &&
            ((msg_head->nframes != 1) ||
             (!(msg_head->can_id & CAN_RTR_FLAG))))
index 400a04d5c9a1335216f6bebca3b2892ec99a3d6b..739435a6af3983fa2776a4b402a51d9d7aefeec8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <linux/keyctl.h>
 #include <linux/err.h>
+#include <linux/seq_file.h>
 #include <keys/dns_resolver-type.h>
 #include <keys/user-type.h>
 #include "internal.h"
@@ -43,6 +44,8 @@ MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
 
 const struct cred *dns_resolver_cache;
 
+#define        DNS_ERRORNO_OPTION      "dnserror"
+
 /*
  * Instantiate a user defined key for dns_resolver.
  *
@@ -59,9 +62,10 @@ static int
 dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
 {
        struct user_key_payload *upayload;
+       unsigned long derrno;
        int ret;
        size_t result_len = 0;
-       const char *data = _data, *opt;
+       const char *data = _data, *end, *opt;
 
        kenter("%%%d,%s,'%s',%zu",
               key->serial, key->description, data, datalen);
@@ -71,13 +75,77 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
        datalen--;
 
        /* deal with any options embedded in the data */
+       end = data + datalen;
        opt = memchr(data, '#', datalen);
        if (!opt) {
-               kdebug("no options currently supported");
-               return -EINVAL;
+               /* no options: the entire data is the result */
+               kdebug("no options");
+               result_len = datalen;
+       } else {
+               const char *next_opt;
+
+               result_len = opt - data;
+               opt++;
+               kdebug("options: '%s'", opt);
+               do {
+                       const char *eq;
+                       int opt_len, opt_nlen, opt_vlen, tmp;
+
+                       next_opt = memchr(opt, '#', end - opt) ?: end;
+                       opt_len = next_opt - opt;
+                       if (!opt_len) {
+                               printk(KERN_WARNING
+                                      "Empty option to dns_resolver key %d\n",
+                                      key->serial);
+                               return -EINVAL;
+                       }
+
+                       eq = memchr(opt, '=', opt_len) ?: end;
+                       opt_nlen = eq - opt;
+                       eq++;
+                       opt_vlen = next_opt - eq; /* will be -1 if no value */
+
+                       tmp = opt_vlen >= 0 ? opt_vlen : 0;
+                       kdebug("option '%*.*s' val '%*.*s'",
+                              opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+
+                       /* see if it's an error number representing a DNS error
+                        * that's to be recorded as the result in this key */
+                       if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
+                           memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
+                               kdebug("dns error number option");
+                               if (opt_vlen <= 0)
+                                       goto bad_option_value;
+
+                               ret = strict_strtoul(eq, 10, &derrno);
+                               if (ret < 0)
+                                       goto bad_option_value;
+
+                               if (derrno < 1 || derrno > 511)
+                                       goto bad_option_value;
+
+                               kdebug("dns error no. = %lu", derrno);
+                               key->type_data.x[0] = -derrno;
+                               continue;
+                       }
+
+               bad_option_value:
+                       printk(KERN_WARNING
+                              "Option '%*.*s' to dns_resolver key %d:"
+                              " bad/missing value\n",
+                              opt_nlen, opt_nlen, opt, key->serial);
+                       return -EINVAL;
+               } while (opt = next_opt + 1, opt < end);
+       }
+
+       /* don't cache the result if we're caching an error saying there's no
+        * result */
+       if (key->type_data.x[0]) {
+               kleave(" = 0 [h_error %ld]", key->type_data.x[0]);
+               return 0;
        }
 
-       result_len = datalen;
+       kdebug("store result");
        ret = key_payload_reserve(key, result_len);
        if (ret < 0)
                return -EINVAL;
@@ -135,13 +203,27 @@ no_match:
        return ret;
 }
 
+/*
+ * Describe a DNS key
+ */
+static void dns_resolver_describe(const struct key *key, struct seq_file *m)
+{
+       int err = key->type_data.x[0];
+
+       seq_puts(m, key->description);
+       if (err)
+               seq_printf(m, ": %d", err);
+       else
+               seq_printf(m, ": %u", key->datalen);
+}
+
 struct key_type key_type_dns_resolver = {
        .name           = "dns_resolver",
        .instantiate    = dns_resolver_instantiate,
        .match          = dns_resolver_match,
        .revoke         = user_revoke,
        .destroy        = user_destroy,
-       .describe       = user_describe,
+       .describe       = dns_resolver_describe,
        .read           = user_read,
 };
 
index 03d5255f5cf2a08cd4a1efe8d4a57638d5c15f65..c32be292c7e382c4a2c600e9e3c559906ff9784b 100644 (file)
@@ -136,6 +136,11 @@ int dns_query(const char *type, const char *name, size_t namelen,
        if (ret < 0)
                goto put;
 
+       /* If the DNS server gave an error, return that to the caller */
+       ret = rkey->type_data.x[0];
+       if (ret)
+               goto put;
+
        upayload = rcu_dereference_protected(rkey->payload.data,
                                             lockdep_is_held(&rkey->sem));
        len = upayload->datalen;
index 11201784d29a32d6fdddd972a8348d3202de0ec9..87bb5f4de0e84601817a0f63733a725ebb478b03 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig NET_DSA
        bool "Distributed Switch Architecture support"
        default n
-       depends on EXPERIMENTAL && NET_ETHERNET && !S390
+       depends on EXPERIMENTAL && NETDEVICES && !S390
        select PHYLIB
        ---help---
          This allows you to use hardware switch chips that use
index b9e8c3b7d406aacd9cc2319ddbffe0440cc6c311..408eea7086aace341d1ed9812d956db35cfdfb86 100644 (file)
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
        if (qops->enqueue == NULL)
                qops->enqueue = noop_qdisc_ops.enqueue;
        if (qops->peek == NULL) {
-               if (qops->dequeue == NULL) {
+               if (qops->dequeue == NULL)
                        qops->peek = noop_qdisc_ops.peek;
-               } else {
-                       rc = -EINVAL;
-                       goto out;
-               }
+               else
+                       goto out_einval;
        }
        if (qops->dequeue == NULL)
                qops->dequeue = noop_qdisc_ops.dequeue;
 
+       if (qops->cl_ops) {
+               const struct Qdisc_class_ops *cops = qops->cl_ops;
+
+               if (!(cops->get && cops->put && cops->walk && cops->leaf))
+                       goto out_einval;
+
+               if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+                       goto out_einval;
+       }
+
        qops->next = NULL;
        *qp = qops;
        rc = 0;
 out:
        write_unlock(&qdisc_mod_lock);
        return rc;
+
+out_einval:
+       rc = -EINVAL;
+       goto out;
 }
 EXPORT_SYMBOL(register_qdisc);
 
index e114f23d5eaeb189428fbf4d307669da80e4f255..3406627895298324fdd9d27186ad8c9c8d9a9964 100644 (file)
@@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        ret = qdisc_enqueue(skb, flow->q);
-       if (ret != 0) {
+       if (ret != NET_XMIT_SUCCESS) {
 drop: __maybe_unused
                if (net_xmit_drop_count(ret)) {
                        sch->qstats.drops++;
@@ -442,7 +442,7 @@ drop: __maybe_unused
         */
        if (flow == &p->link) {
                sch->q.qlen++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
        tasklet_schedule(&p->task);
        return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
index 534f33231c17d83593f124fa62d110d630c2fc0d..201cbac2b32ce0ccd8375b066005b6ba69759ddd 100644 (file)
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (++sch->q.qlen <= q->limit) {
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
 
        sfq_drop(sch);
@@ -508,6 +508,11 @@ nla_put_failure:
        return -1;
 }
 
+static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       return NULL;
+}
+
 static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
 {
        return 0;
@@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
        return 0;
 }
 
+static void sfq_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 }
 
 static const struct Qdisc_class_ops sfq_class_ops = {
+       .leaf           =       sfq_leaf,
        .get            =       sfq_get,
+       .put            =       sfq_put,
        .tcf_chain      =       sfq_find_tcf,
        .bind_tcf       =       sfq_bind,
+       .unbind_tcf     =       sfq_put,
        .dump           =       sfq_dump_class,
        .dump_stats     =       sfq_dump_class_stats,
        .walk           =       sfq_walk,
index 0991c640cd3e8f3e5ae836c44b16b9e27f743c7e..641a30d646356867b808ac86d7bac59901fe2729 100644 (file)
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                return qdisc_reshape_fail(skb, sch);
 
        ret = qdisc_enqueue(skb, q->qdisc);
-       if (ret != 0) {
+       if (ret != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret))
                        sch->qstats.drops++;
                return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        sch->q.qlen++;
        sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
-       return 0;
+       return NET_XMIT_SUCCESS;
 }
 
 static unsigned int tbf_drop(struct Qdisc* sch)
index 807643bdcbac30817edecfb4185dd45826b750eb..feaabc103ce6a061e350ddb07e1a2faf5d1ee4bd 100644 (file)
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                __skb_queue_tail(&q->q, skb);
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
 
        kfree_skb(skb);
index e74a1a2119d34a334702bc16106c43fefa2ffdbd..d1a3fb99fdf2caaa83e5b476dd3166c18f436161 100644 (file)
@@ -843,13 +843,19 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
                return -EINVAL;
        if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
                /* Verify that we are associated with the destination AP */
+               wdev_lock(wdev);
+
                if (!wdev->current_bss ||
                    memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
                           ETH_ALEN) != 0 ||
                    (wdev->iftype == NL80211_IFTYPE_STATION &&
                     memcmp(wdev->current_bss->pub.bssid, mgmt->da,
-                           ETH_ALEN) != 0))
+                           ETH_ALEN) != 0)) {
+                       wdev_unlock(wdev);
                        return -ENOTCONN;
+               }
+
+               wdev_unlock(wdev);
        }
 
        if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)